summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--2.3-1/etc/scilab2c.start1
-rw-r--r--2.3-1/includes/sci2clib.h2
-rw-r--r--2.3-1/macros/CCodeGeneration/C_GenerateMakefile.binbin30200 -> 32812 bytes
-rw-r--r--2.3-1/macros/CCodeGeneration/C_GenerateMakefile.sci17
-rw-r--r--2.3-1/macros/ImageProcessing/buildmacros.sce10
-rw-r--r--2.3-1/macros/ImageProcessing/core/CV_CreateImage.binbin3564 -> 3604 bytes
-rw-r--r--2.3-1/macros/ImageProcessing/core/CV_CreateImage.sci2
-rw-r--r--2.3-1/macros/ImageProcessing/core/CV_GetImgSize.binbin0 -> 2764 bytes
-rw-r--r--2.3-1/macros/ImageProcessing/core/CV_GetImgSize.sci38
-rw-r--r--2.3-1/macros/ImageProcessing/core/libbin676 -> 700 bytes
-rw-r--r--2.3-1/macros/ImageProcessing/core/names1
-rw-r--r--2.3-1/macros/ImageProcessing/imgproc/CV_CvtColor.binbin0 -> 4224 bytes
-rw-r--r--2.3-1/macros/ImageProcessing/imgproc/CV_CvtColor.sci43
-rw-r--r--2.3-1/macros/ImageProcessing/imgproc/buildmacros.sce15
-rw-r--r--2.3-1/macros/ImageProcessing/imgproc/libbin0 -> 688 bytes
-rw-r--r--2.3-1/macros/ImageProcessing/imgproc/names1
-rw-r--r--2.3-1/macros/ToolInitialization/INIT_FillSCI2LibCDirs.binbin1236688 -> 1243008 bytes
-rw-r--r--2.3-1/macros/ToolInitialization/INIT_FillSCI2LibCDirs.sci55
-rw-r--r--2.3-1/macros/findDeps/getAllHeaders.binbin32576 -> 32772 bytes
-rw-r--r--2.3-1/macros/findDeps/getAllHeaders.sci3
-rw-r--r--2.3-1/macros/findDeps/getAllInterfaces.binbin33876 -> 34096 bytes
-rw-r--r--2.3-1/macros/findDeps/getAllInterfaces.sci3
-rw-r--r--2.3-1/macros/findDeps/getAllLibraries.binbin2076 -> 7740 bytes
-rw-r--r--2.3-1/macros/findDeps/getAllLibraries.sci28
-rw-r--r--2.3-1/macros/findDeps/getAllSources.binbin193416 -> 193844 bytes
-rw-r--r--2.3-1/macros/findDeps/getAllSources.sci4
-rw-r--r--2.3-1/macros/runsci2c.binbin30896 -> 31976 bytes
-rw-r--r--2.3-1/macros/runsci2c.sci9
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/libjasper.abin0 -> 1029234 bytes
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/libjpeg.abin0 -> 1119888 bytes
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/libtiff.abin0 -> 1380758 bytes
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/libopencv_calib3d.sobin0 -> 3629128 bytes
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/libopencv_contrib.sobin0 -> 6065088 bytes
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/libopencv_core.sobin0 -> 7376312 bytes
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/libopencv_features2d.sobin0 -> 4200032 bytes
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/libopencv_flann.sobin0 -> 2697296 bytes
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/libopencv_gpu.sobin0 -> 2619680 bytes
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/libopencv_highgui.sobin0 -> 7838352 bytes
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/libopencv_imgproc.sobin0 -> 7284984 bytes
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/libopencv_legacy.sobin0 -> 6518888 bytes
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/libopencv_ml.sobin0 -> 1971600 bytes
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/libopencv_nonfree.sobin0 -> 1091828 bytes
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/libopencv_objdetect.sobin0 -> 3734856 bytes
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/libopencv_ocl.sobin0 -> 7169952 bytes
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/libopencv_photo.sobin0 -> 410344 bytes
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/libopencv_stitching.sobin0 -> 3714560 bytes
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/libopencv_superres.sobin0 -> 1213364 bytes
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/libopencv_ts.abin0 -> 8438392 bytes
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/libopencv_video.sobin0 -> 1385940 bytes
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/libopencv_videostab.sobin0 -> 1473068 bytes
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/calib3d.hpp43
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/calib3d/calib3d.hpp811
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/contrib/contrib.hpp998
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/contrib/detection_based_tracker.hpp106
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/contrib/hybridtracker.hpp220
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/contrib/openfabmap.hpp405
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/contrib/retina.hpp354
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core.hpp43
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/affine.hpp513
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/core.hpp4924
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/core_c.h1886
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/cuda_devptrs.hpp199
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/devmem2d.hpp43
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/eigen.hpp280
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/gpumat.hpp564
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/internal.hpp795
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/mat.hpp2625
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/opengl_interop.hpp284
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/opengl_interop_deprecated.hpp300
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/operations.hpp4123
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/types_c.h1923
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/version.hpp72
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/wimage.hpp621
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/features2d.hpp43
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/features2d/features2d.hpp1616
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann.hpp43
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/all_indices.h155
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/allocator.h188
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/any.h318
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/autotuned_index.h595
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/composite_index.h201
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/config.h38
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/defines.h176
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/dist.h937
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/dummy.h45
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/dynamic_bitset.h159
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/flann.hpp427
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/flann_base.hpp301
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/general.h52
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/ground_truth.h94
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/hdf5.h231
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/heap.h165
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/hierarchical_clustering_index.h776
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/index_testing.h318
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/kdtree_index.h628
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/kdtree_single_index.h641
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/kmeans_index.h1133
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/linear_index.h139
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/logger.h130
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/lsh_index.h420
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/lsh_table.h497
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/matrix.h116
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/miniflann.hpp163
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/nn_index.h184
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/object_factory.h91
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/params.h99
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/random.h133
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/result_set.h543
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/sampling.h81
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/saving.h187
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/simplex_downhill.h186
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/timer.h93
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/block.hpp203
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/border_interpolate.hpp714
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/color.hpp301
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/common.hpp118
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/datamov_utils.hpp105
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/detail/color_detail.hpp2219
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/detail/reduce.hpp361
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/detail/reduce_key_val.hpp498
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/detail/transform_detail.hpp395
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/detail/type_traits_detail.hpp187
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/detail/vec_distance_detail.hpp117
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/dynamic_smem.hpp80
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/emulation.hpp138
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/filters.hpp278
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/funcattrib.hpp71
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/functional.hpp789
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/limits.hpp122
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/reduce.hpp197
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/saturate_cast.hpp284
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/scan.hpp250
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/simd_functions.hpp909
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/static_check.hpp67
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/transform.hpp67
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/type_traits.hpp82
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/utility.hpp213
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/vec_distance.hpp224
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/vec_math.hpp922
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/vec_traits.hpp280
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/warp.hpp131
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/warp_reduce.hpp68
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/warp_shuffle.hpp145
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/devmem2d.hpp43
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/gpu.hpp2530
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/gpumat.hpp43
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/stream_accessor.hpp65
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/highgui.hpp43
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/highgui/cap_ios.h171
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/highgui/highgui.hpp255
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/highgui/highgui_c.h660
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/highgui/ios.h49
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/imgproc.hpp43
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/imgproc/imgproc.hpp1299
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/imgproc/imgproc_c.h623
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/imgproc/types_c.h640
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/legacy/blobtrack.hpp948
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/legacy/compat.hpp740
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/legacy/legacy.hpp3436
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/legacy/streams.hpp92
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/ml.hpp41
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/ml/ml.hpp2147
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/nonfree/features2d.hpp155
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/nonfree/gpu.hpp128
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/nonfree/nonfree.hpp57
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/nonfree/ocl.hpp140
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/objdetect.hpp43
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/objdetect/objdetect.hpp1073
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/ocl/matrix_operations.hpp490
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/ocl/ocl.hpp1998
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/opencv.hpp83
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/opencv_modules.hpp29
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/photo.hpp43
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/photo/photo.hpp91
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/photo/photo_c.h69
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/stitching.hpp43
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/stitching/detail/autocalib.hpp65
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/stitching/detail/blenders.hpp137
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/stitching/detail/camera.hpp69
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/stitching/detail/exposure_compensate.hpp106
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/stitching/detail/matchers.hpp192
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/stitching/detail/motion_estimators.hpp205
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/stitching/detail/seam_finders.hpp267
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/stitching/detail/util.hpp162
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/stitching/detail/util_inl.hpp127
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/stitching/detail/warpers.hpp510
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/stitching/detail/warpers_inl.hpp765
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/stitching/stitcher.hpp174
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/stitching/warpers.hpp170
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/superres.hpp43
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/superres/optical_flow.hpp76
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/superres/superres.hpp99
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/ts.hpp43
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/ts/gpu_perf.hpp115
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/ts/gpu_test.hpp360
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/ts/ts.hpp638
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/ts/ts_gtest.h20125
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/ts/ts_perf.hpp618
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/video.hpp43
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/video/background_segm.hpp263
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/video/tracking.hpp373
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/video/video.hpp58
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/videostab.hpp43
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/videostab/deblurring.hpp110
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/videostab/fast_marching.hpp103
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/videostab/fast_marching_inl.hpp166
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/videostab/frame_source.hpp91
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/videostab/global_motion.hpp141
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/videostab/inpainting.hpp200
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/videostab/log.hpp75
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/videostab/motion_stabilizing.hpp106
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/videostab/optical_flow.hpp120
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/videostab/stabilizer.hpp187
-rw-r--r--2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/videostab/videostab.hpp48
-rw-r--r--2.3-1/src/c/imageProcessing/cvcore/imcvCreateImages.c2
-rw-r--r--2.3-1/src/c/imageProcessing/cvcore/imcvGetImgSizes.c32
-rw-r--r--2.3-1/src/c/imageProcessing/cvimgproc/imcvCvtColors.c27
-rw-r--r--2.3-1/src/c/imageProcessing/cvimgproc/imcvThresholds.c27
-rw-r--r--2.3-1/src/c/imageProcessing/includes/cvcore.h2
-rw-r--r--2.3-1/src/c/imageProcessing/includes/cvhighgui.h2
-rw-r--r--2.3-1/src/c/imageProcessing/includes/cvimgproc.h31
-rw-r--r--2.3-1/src/c/imageProcessing/interfaces/int_cvcore.h3
-rw-r--r--2.3-1/src/c/imageProcessing/interfaces/int_cvhighgui.h2
-rw-r--r--2.3-1/src/c/imageProcessing/interfaces/int_cvimgproc.h32
224 files changed, 90873 insertions, 31 deletions
diff --git a/2.3-1/etc/scilab2c.start b/2.3-1/etc/scilab2c.start
index 07a95e10..59cf0865 100644
--- a/2.3-1/etc/scilab2c.start
+++ b/2.3-1/etc/scilab2c.start
@@ -25,6 +25,7 @@ sci2c_RPilib = lib(pathmacros + "Hardware\RasberryPi"+ filesep());
sci2c_ScilabArduinolib = lib(pathmacros + "Scilab-Arduino" + filesep());
sci2c_CVCorelib = lib(pathmacros + "ImageProcessing\core" + filesep());
sci2c_CVHighguilib = lib(pathmacros + "ImageProcessing\highgui" + filesep());
+sci2c_CVImgproclib = lib(pathmacros + "ImageProcessing\imgproc" + filesep());
clear pathmacros;
// Load and add help chapter
diff --git a/2.3-1/includes/sci2clib.h b/2.3-1/includes/sci2clib.h
index e4eac2db..3567ca78 100644
--- a/2.3-1/includes/sci2clib.h
+++ b/2.3-1/includes/sci2clib.h
@@ -411,6 +411,8 @@
#include "int_cvcore.h"
#include "cvhighgui.h"
#include "int_cvhighgui.h"
+#include "cvimgproc.h"
+#include "int_cvimgproc.h"
/*Scilab-arduino toolbox*/
#ifdef Arduino1
diff --git a/2.3-1/macros/CCodeGeneration/C_GenerateMakefile.bin b/2.3-1/macros/CCodeGeneration/C_GenerateMakefile.bin
index efa5cd65..ada2bb27 100644
--- a/2.3-1/macros/CCodeGeneration/C_GenerateMakefile.bin
+++ b/2.3-1/macros/CCodeGeneration/C_GenerateMakefile.bin
Binary files differ
diff --git a/2.3-1/macros/CCodeGeneration/C_GenerateMakefile.sci b/2.3-1/macros/CCodeGeneration/C_GenerateMakefile.sci
index a9156168..f184c39c 100644
--- a/2.3-1/macros/CCodeGeneration/C_GenerateMakefile.sci
+++ b/2.3-1/macros/CCodeGeneration/C_GenerateMakefile.sci
@@ -66,11 +66,21 @@ else
if (target == 'RPi')
PrintStringInfo('CC = arm-linux-gnueabihf-gcc ',FileInfo.MakefileFilename,'file','y','y');
PrintStringInfo('CFLAGS = -Wall -pedantic -g -I $(HSRCDIR) -I $(ISRCDIR) -L $(LIBDIR)',FileInfo.MakefileFilename,'file','y','y');
- PrintStringInfo('LDFLAGS = -llapack -lrefblas -lgfortran -lm -lwiringPi',FileInfo.MakefileFilename,'file','y','y');
+ PrintStringInfo('LDFLAGS = -llapack -lrefblas -lgfortran -lwiringPi',FileInfo.MakefileFilename,'file','y','y');
+ if(SharedInfo.OpenCVUsed == %T)
+ PrintStringInfo('LDFLAGS += -lopencv_calib3d -lopencv_contrib -lopencv_core -lopencv_features2d -lopencv_flann',FileInfo.MakefileFilename,'file','y','y');
+ PrintStringInfo('LDFLAGS += -lopencv_gpu -lopencv_highgui -lopencv_imgproc -lopencv_legacy -lopencv_ml',FileInfo.MakefileFilename,'file','y','y');
+ PrintStringInfo('LDFLAGS += -lopencv_nonfree -lopencv_objdetect -lopencv_ocl -lopencv_photo -lopencv_stitching',FileInfo.MakefileFilename,'file','y','y');
+ PrintStringInfo('LDFLAGS += -lopencv_superres -lopencv_ts -lopencv_video -lopencv_videostab -lrt -lpthread -lm -ldl', FileInfo.MakefileFilename,'file','y','y');
+ end
else
PrintStringInfo('CC = gcc',FileInfo.MakefileFilename,'file','y','y');
PrintStringInfo('CFLAGS = -Wall -pedantic -g -I $(HSRCDIR) -I $(ISRCDIR) -L $(LIBDIR)',FileInfo.MakefileFilename,'file','y','y');
PrintStringInfo('LDFLAGS = -lblas -llapack -lm ',FileInfo.MakefileFilename,'file','y','y');
+ if(SharedInfo.OpenCVUsed == %T)
+ PrintStringInfo('LDFLAGS += `pkg-config --libs opencv`',FileInfo.MakefileFilename,'file','y','y');
+ PrintStringInfo('CFLAGS += `pkg-config --cflags opencv`',FileInfo.MakefileFilename,'file','y','y');
+ end
end
end
@@ -81,11 +91,6 @@ if(size(SharedInfo.Includelist) <> 0)
end
end
-if(SharedInfo.OpenCVUsed == %T)
- PrintStringInfo('LDFLAGS += `pkg-config --libs opencv`',FileInfo.MakefileFilename,'file','y','y');
- PrintStringInfo('CFLAGS += `pkg-config --cflags opencv`',FileInfo.MakefileFilename,'file','y','y');
-end
-
// Binary definition
PrintStringInfo('EXEFILENAME = '+SharedInfo.SCIMainFunName,FileInfo.MakefileFilename,'file','y','y');
PrintStringInfo('EXEFILE = $(SCI2CDIR)/$(EXEFILENAME)', FileInfo.MakefileFilename,'file','y','y');
diff --git a/2.3-1/macros/ImageProcessing/buildmacros.sce b/2.3-1/macros/ImageProcessing/buildmacros.sce
index 14c7c1b1..6431a7df 100644
--- a/2.3-1/macros/ImageProcessing/buildmacros.sce
+++ b/2.3-1/macros/ImageProcessing/buildmacros.sce
@@ -9,8 +9,9 @@
// Organization: FOSSEE, IIT Bombay
// Email: toolbox@scilab.in
-OpencvDirs = [ "core", ...
- "highgui"];
+OpencvDirs = [ "core", ...
+ "highgui", ...
+ "imgproc"];
current_path = get_absolute_file_path("buildmacros.sce");
@@ -23,8 +24,3 @@ OpencvDirs = [ "core", ...
end
clear current_path;
-
-tbx_build_macros(TOOLBOX_NAME, get_absolute_file_path('buildmacros.sce'));
-
-clear tbx_build_macros;
-
diff --git a/2.3-1/macros/ImageProcessing/core/CV_CreateImage.bin b/2.3-1/macros/ImageProcessing/core/CV_CreateImage.bin
index 4c638cc2..d5aa3080 100644
--- a/2.3-1/macros/ImageProcessing/core/CV_CreateImage.bin
+++ b/2.3-1/macros/ImageProcessing/core/CV_CreateImage.bin
Binary files differ
diff --git a/2.3-1/macros/ImageProcessing/core/CV_CreateImage.sci b/2.3-1/macros/ImageProcessing/core/CV_CreateImage.sci
index 49649201..72dd9b8c 100644
--- a/2.3-1/macros/ImageProcessing/core/CV_CreateImage.sci
+++ b/2.3-1/macros/ImageProcessing/core/CV_CreateImage.sci
@@ -13,7 +13,7 @@ function img = CV_CreateImage(width,height,bit_depth,no_of_channels)
// This function can be used to create opencv image object. For more info
// about bit depth and channels,please refer to OpenCV documentation
// Examples
-// CV_CreateImage([320 240], "8U", 1) //to create image of the size 320*240
+// CV_CreateImage([320 240], "IPL_DEPTH_8U", 1) //to create image of the size 320*240
// pixels with 8 bit unsigned each pixels and gray scale image
//
// See also
diff --git a/2.3-1/macros/ImageProcessing/core/CV_GetImgSize.bin b/2.3-1/macros/ImageProcessing/core/CV_GetImgSize.bin
new file mode 100644
index 00000000..ede60e6f
--- /dev/null
+++ b/2.3-1/macros/ImageProcessing/core/CV_GetImgSize.bin
Binary files differ
diff --git a/2.3-1/macros/ImageProcessing/core/CV_GetImgSize.sci b/2.3-1/macros/ImageProcessing/core/CV_GetImgSize.sci
new file mode 100644
index 00000000..cd7ebdaf
--- /dev/null
+++ b/2.3-1/macros/ImageProcessing/core/CV_GetImgSize.sci
@@ -0,0 +1,38 @@
+// Copyright (C) 2016 - IIT Bombay - FOSSEE
+//
+// This file must be used under the terms of the CeCILL.
+// This source file is licensed as described in the file COPYING, which
+// you should have received as part of this distribution. The terms
+// are also available at
+// http://www.cecill.info/licences/Licence_CeCILL_V2-en.txt
+// Author: Siddhesh Wani
+// Organization: FOSSEE, IIT Bombay
+// Email: toolbox@scilab.in
+
+function imgsize = CV_GetImgSize(img)
+// function to get size of the image (width*height)
+//
+// Calling Sequence
+// CV_GetImgSize(img)
+//
+// Parameters
+// img: image whose size is to be returned
+//
+// Description
+// This function can be used for retriving size information of the image.
+// It returs an array with first image element as width and second as height
+// Examples
+// img = CV_LoadImage('~/test.jpg',0)
+// size = CV_GetImgSize(img)
+//
+// See also
+// CV_LoadImage CV_CreateImage
+//
+// Authors
+// Siddhesh Wani
+//
+imgsize = [0 0];
+// This is curretly dummy function. It provides no functionality but is required
+// for providing support for generating C code for OpenCV
+
+endfunction
diff --git a/2.3-1/macros/ImageProcessing/core/lib b/2.3-1/macros/ImageProcessing/core/lib
index 4bccbc8b..8d5e5c74 100644
--- a/2.3-1/macros/ImageProcessing/core/lib
+++ b/2.3-1/macros/ImageProcessing/core/lib
Binary files differ
diff --git a/2.3-1/macros/ImageProcessing/core/names b/2.3-1/macros/ImageProcessing/core/names
index a681f790..f56c951c 100644
--- a/2.3-1/macros/ImageProcessing/core/names
+++ b/2.3-1/macros/ImageProcessing/core/names
@@ -1 +1,2 @@
CV_CreateImage
+CV_GetImgSize
diff --git a/2.3-1/macros/ImageProcessing/imgproc/CV_CvtColor.bin b/2.3-1/macros/ImageProcessing/imgproc/CV_CvtColor.bin
new file mode 100644
index 00000000..6ca6ea46
--- /dev/null
+++ b/2.3-1/macros/ImageProcessing/imgproc/CV_CvtColor.bin
Binary files differ
diff --git a/2.3-1/macros/ImageProcessing/imgproc/CV_CvtColor.sci b/2.3-1/macros/ImageProcessing/imgproc/CV_CvtColor.sci
new file mode 100644
index 00000000..8baa3865
--- /dev/null
+++ b/2.3-1/macros/ImageProcessing/imgproc/CV_CvtColor.sci
@@ -0,0 +1,43 @@
+// Copyright (C) 2016 - IIT Bombay - FOSSEE
+//
+// This file must be used under the terms of the CeCILL.
+// This source file is licensed as described in the file COPYING, which
+// you should have received as part of this distribution. The terms
+// are also available at
+// http://www.cecill.info/licences/Licence_CeCILL_V2-en.txt
+// Author: Siddhesh Wani
+// Organization: FOSSEE, IIT Bombay
+// Email: toolbox@scilab.in
+
+function status = CV_CvtColor(srcimg,dstimg,code)
+// function to convert image from one colorspace to other colorspace
+//
+// Calling Sequence
+// CV_CvtColor(srcimg,dstimg,code,dstCn)
+//
+// Parameters
+// srcimg: source image to be converted
+// dstimg: destination image in which to store converted image
+// code: String specifying conversion type. Same as defined in OpenCV
+// for eg. 'CV_RGB2GRAY' for conversion from RGB image to grayscale image
+// dstCn: no of channels in destination image (0 by default)
+//
+// Description
+// This function can be used for converting an image to other colorspace.
+// Refer OpenCV documentation for list of available conversions
+// Examples
+// img = CV_LoadImage('~/test.jpg',0)
+// dst = CV_CreateImage(320,240,"IPL_DEPTH_8U",1)
+// CV_CvtColor(img,dst,'CV_RGB2GRAY')
+//
+// See also
+// CV_LoadImage CV_CreateImage
+//
+// Authors
+// Siddhesh Wani
+//
+status = 0;
+// This is curretly dummy function. It provides no functionality but is required
+// for providing support for generating C code for OpenCV
+
+endfunction
diff --git a/2.3-1/macros/ImageProcessing/imgproc/buildmacros.sce b/2.3-1/macros/ImageProcessing/imgproc/buildmacros.sce
new file mode 100644
index 00000000..60fd2843
--- /dev/null
+++ b/2.3-1/macros/ImageProcessing/imgproc/buildmacros.sce
@@ -0,0 +1,15 @@
+//
+// Scilab ( http://www.scilab.org/ ) - This file is part of Scilab
+// Copyright (C) 2009-2009 - DIGITEO - Bruno JOFRET
+//
+// This file must be used under the terms of the CeCILL.
+// This source file is licensed as described in the file COPYING, which
+// you should have received as part of this distribution. The terms
+// are also available at
+// http://www.cecill.info/licences/Licence_CeCILL_V2-en.txt
+//
+//
+
+tbx_build_macros(TOOLBOX_NAME, get_absolute_file_path('buildmacros.sce'));
+
+clear tbx_build_macros;
diff --git a/2.3-1/macros/ImageProcessing/imgproc/lib b/2.3-1/macros/ImageProcessing/imgproc/lib
new file mode 100644
index 00000000..601946ed
--- /dev/null
+++ b/2.3-1/macros/ImageProcessing/imgproc/lib
Binary files differ
diff --git a/2.3-1/macros/ImageProcessing/imgproc/names b/2.3-1/macros/ImageProcessing/imgproc/names
new file mode 100644
index 00000000..537af898
--- /dev/null
+++ b/2.3-1/macros/ImageProcessing/imgproc/names
@@ -0,0 +1 @@
+CV_CvtColor
diff --git a/2.3-1/macros/ToolInitialization/INIT_FillSCI2LibCDirs.bin b/2.3-1/macros/ToolInitialization/INIT_FillSCI2LibCDirs.bin
index ab16036f..8e238866 100644
--- a/2.3-1/macros/ToolInitialization/INIT_FillSCI2LibCDirs.bin
+++ b/2.3-1/macros/ToolInitialization/INIT_FillSCI2LibCDirs.bin
Binary files differ
diff --git a/2.3-1/macros/ToolInitialization/INIT_FillSCI2LibCDirs.sci b/2.3-1/macros/ToolInitialization/INIT_FillSCI2LibCDirs.sci
index 5e4eb036..ac8d823c 100644
--- a/2.3-1/macros/ToolInitialization/INIT_FillSCI2LibCDirs.sci
+++ b/2.3-1/macros/ToolInitialization/INIT_FillSCI2LibCDirs.sci
@@ -5401,7 +5401,6 @@ ClassName = 'DIFF';
PrintStringInfo(' Adding Class: '+ClassName+'.',GeneralReport,'file','y');
ClassFileName = fullfile(SCI2CLibCAnnClsDir,ClassName+ExtensionCAnnCls);
-//Arguements specified: initial value, start time, time vector, ode function
PrintStringInfo('NIN= 1',ClassFileName,'file','y');
PrintStringInfo('NOUT= 1',ClassFileName,'file','y');
PrintStringInfo('OUT(1).TP= IN(1).TP',ClassFileName,'file','y');
@@ -5467,7 +5466,6 @@ ClassName = 'NORM';
PrintStringInfo(' Adding Class: '+ClassName+'.',GeneralReport,'file','y');
ClassFileName = fullfile(SCI2CLibCAnnClsDir,ClassName+ExtensionCAnnCls);
-//Arguements specified: initial value, start time, time vector, ode function
PrintStringInfo('NIN= 1',ClassFileName,'file','y');
PrintStringInfo('NOUT= 1',ClassFileName,'file','y');
PrintStringInfo('OUT(1).TP= IN(1).TP',ClassFileName,'file','y');
@@ -5509,7 +5507,6 @@ ClassName = 'CONVSTR';
PrintStringInfo(' Adding Class: '+ClassName+'.',GeneralReport,'file','y');
ClassFileName = fullfile(SCI2CLibCAnnClsDir,ClassName+ExtensionCAnnCls);
-//Arguements specified: initial value, start time, time vector, ode function
PrintStringInfo('NIN= 1',ClassFileName,'file','y');
PrintStringInfo('NOUT= 1',ClassFileName,'file','y');
PrintStringInfo('OUT(1).TP= IN(1).TP',ClassFileName,'file','y');
@@ -5544,7 +5541,6 @@ ClassName = 'CV_CreateImage';
PrintStringInfo(' Adding Class: '+ClassName+'.',GeneralReport,'file','y');
ClassFileName = fullfile(SCI2CLibCAnnClsDir,ClassName+ExtensionCAnnCls);
-//Arguements specified: initial value, start time, time vector, ode function
PrintStringInfo('NIN= 4',ClassFileName,'file','y');
PrintStringInfo('NOUT= 1',ClassFileName,'file','y');
PrintStringInfo('OUT(1).TP= ''im''',ClassFileName,'file','y');
@@ -5569,7 +5565,6 @@ ClassName = 'CV_LoadImage';
PrintStringInfo(' Adding Class: '+ClassName+'.',GeneralReport,'file','y');
ClassFileName = fullfile(SCI2CLibCAnnClsDir,ClassName+ExtensionCAnnCls);
-//Arguements specified: initial value, start time, time vector, ode function
PrintStringInfo('NIN= 2',ClassFileName,'file','y');
PrintStringInfo('NOUT= 1',ClassFileName,'file','y');
PrintStringInfo('OUT(1).TP= ''im''',ClassFileName,'file','y');
@@ -5594,7 +5589,6 @@ ClassName = 'CV_ShowImage';
PrintStringInfo(' Adding Class: '+ClassName+'.',GeneralReport,'file','y');
ClassFileName = fullfile(SCI2CLibCAnnClsDir,ClassName+ExtensionCAnnCls);
-//Arguements specified: initial value, start time, time vector, ode function
PrintStringInfo('NIN= 2',ClassFileName,'file','y');
PrintStringInfo('NOUT= 1',ClassFileName,'file','y');
PrintStringInfo('OUT(1).TP= ''u8''',ClassFileName,'file','y');
@@ -5631,7 +5625,6 @@ ClassName = 'CV_WaitKey';
PrintStringInfo(' Adding Class: '+ClassName+'.',GeneralReport,'file','y');
ClassFileName = fullfile(SCI2CLibCAnnClsDir,ClassName+ExtensionCAnnCls);
-//Arguements specified: initial value, start time, time vector, ode function
PrintStringInfo('NIN= 1',ClassFileName,'file','y');
PrintStringInfo('NOUT= 1',ClassFileName,'file','y');
PrintStringInfo('OUT(1).TP= ''u8''',ClassFileName,'file','y');
@@ -5647,6 +5640,54 @@ PrintStringInfo(' Adding Function: '+FunctionName+'.',GeneralReport,'file',
INIT_GenAnnFLFunctions(FunctionName,SCI2CLibCAnnFunDir,ClassName,GeneralReport,ExtensionCAnnFun);
INIT_GenAnnFLFunctions(FunctionName,SCI2CLibCFLFunDir,ClassName,GeneralReport,ExtensionCFuncListFun);
+//------------------------------------
+//---- Class CV_CvtColor -------------
+//------------------------------------
+ClassName = 'CV_CvtColor';
+
+// --- Class Annotation. ---
+PrintStringInfo(' Adding Class: '+ClassName+'.',GeneralReport,'file','y');
+ClassFileName = fullfile(SCI2CLibCAnnClsDir,ClassName+ExtensionCAnnCls);
+
+PrintStringInfo('NIN= 3',ClassFileName,'file','y');
+PrintStringInfo('NOUT= 1',ClassFileName,'file','y');
+PrintStringInfo('OUT(1).TP= ''u8''',ClassFileName,'file','y');
+PrintStringInfo('OUT(1).SZ(1)= ''1''',ClassFileName,'file','y');
+PrintStringInfo('OUT(1).SZ(2)= ''1''',ClassFileName,'file','y');
+
+ClassFileName = fullfile(SCI2CLibCFLClsDir,ClassName+ExtensionCFuncListCls);
+PrintStringInfo('im0im0g2'+ArgSeparator+'u80',ClassFileName,'file','y');
+
+// --- Annotation Function And Function List Function. ---
+FunctionName = 'CV_CvtColor';
+PrintStringInfo(' Adding Function: '+FunctionName+'.',GeneralReport,'file','y');
+INIT_GenAnnFLFunctions(FunctionName,SCI2CLibCAnnFunDir,ClassName,GeneralReport,ExtensionCAnnFun);
+INIT_GenAnnFLFunctions(FunctionName,SCI2CLibCFLFunDir,ClassName,GeneralReport,ExtensionCFuncListFun);
+
+//------------------------------------
+//---- Class CV_GetImgSize -----------
+//------------------------------------
+ClassName = 'CV_GetImgSize';
+
+// --- Class Annotation. ---
+PrintStringInfo(' Adding Class: '+ClassName+'.',GeneralReport,'file','y');
+ClassFileName = fullfile(SCI2CLibCAnnClsDir,ClassName+ExtensionCAnnCls);
+
+PrintStringInfo('NIN= 1',ClassFileName,'file','y');
+PrintStringInfo('NOUT= 1',ClassFileName,'file','y');
+PrintStringInfo('OUT(1).TP= ''d''',ClassFileName,'file','y');
+PrintStringInfo('OUT(1).SZ(1)= ''1''',ClassFileName,'file','y');
+PrintStringInfo('OUT(1).SZ(2)= ''2''',ClassFileName,'file','y');
+
+ClassFileName = fullfile(SCI2CLibCFLClsDir,ClassName+ExtensionCFuncListCls);
+PrintStringInfo('im0'+ArgSeparator+'d2',ClassFileName,'file','y');
+
+// --- Annotation Function And Function List Function. ---
+FunctionName = 'CV_GetImgSize';
+PrintStringInfo(' Adding Function: '+FunctionName+'.',GeneralReport,'file','y');
+INIT_GenAnnFLFunctions(FunctionName,SCI2CLibCAnnFunDir,ClassName,GeneralReport,ExtensionCAnnFun);
+INIT_GenAnnFLFunctions(FunctionName,SCI2CLibCFLFunDir,ClassName,GeneralReport,ExtensionCFuncListFun);
+
// ////////////////////////////////////////////
// /////PARTE INTRODOTTA DA ALBERTO MOREA
// /////////////////////////////////////////////
diff --git a/2.3-1/macros/findDeps/getAllHeaders.bin b/2.3-1/macros/findDeps/getAllHeaders.bin
index 2dad8e88..e934bc35 100644
--- a/2.3-1/macros/findDeps/getAllHeaders.bin
+++ b/2.3-1/macros/findDeps/getAllHeaders.bin
Binary files differ
diff --git a/2.3-1/macros/findDeps/getAllHeaders.sci b/2.3-1/macros/findDeps/getAllHeaders.sci
index 846f8f6a..06963ca6 100644
--- a/2.3-1/macros/findDeps/getAllHeaders.sci
+++ b/2.3-1/macros/findDeps/getAllHeaders.sci
@@ -182,7 +182,8 @@ function allHeaders = getAllHeaders(SharedInfo)
OpenCV_headers = [
"src/c/imageProcessing/includes/cvcore.h"
- "src/c/imageProcessing/includes/cvhighgui.h"];
+ "src/c/imageProcessing/includes/cvhighgui.h"
+ "src/c/imageProcessing/includes/cvimgproc.h"];
if Target == "StandAlone"
allHeaders = Standalone_headers;
diff --git a/2.3-1/macros/findDeps/getAllInterfaces.bin b/2.3-1/macros/findDeps/getAllInterfaces.bin
index ee40b128..c46573a8 100644
--- a/2.3-1/macros/findDeps/getAllInterfaces.bin
+++ b/2.3-1/macros/findDeps/getAllInterfaces.bin
Binary files differ
diff --git a/2.3-1/macros/findDeps/getAllInterfaces.sci b/2.3-1/macros/findDeps/getAllInterfaces.sci
index 90975473..0f90d5cc 100644
--- a/2.3-1/macros/findDeps/getAllInterfaces.sci
+++ b/2.3-1/macros/findDeps/getAllInterfaces.sci
@@ -173,7 +173,8 @@ function allInterfaces = getAllInterfaces(SharedInfo)
OpenCV_interfaces = [
"src/c/imageProcessing/interfaces/int_cvcore.h"
- "src/c/imageProcessing/interfaces/int_cvhighgui.h"];
+ "src/c/imageProcessing/interfaces/int_cvhighgui.h"
+ "src/c/imageProcessing/interfaces/int_cvimgproc.h"];
if Target == "StandAlone"
allInterfaces = Standalone_interfaces;
diff --git a/2.3-1/macros/findDeps/getAllLibraries.bin b/2.3-1/macros/findDeps/getAllLibraries.bin
index c6d006fa..f016eba2 100644
--- a/2.3-1/macros/findDeps/getAllLibraries.bin
+++ b/2.3-1/macros/findDeps/getAllLibraries.bin
Binary files differ
diff --git a/2.3-1/macros/findDeps/getAllLibraries.sci b/2.3-1/macros/findDeps/getAllLibraries.sci
index 7cd8f912..2850abad 100644
--- a/2.3-1/macros/findDeps/getAllLibraries.sci
+++ b/2.3-1/macros/findDeps/getAllLibraries.sci
@@ -10,8 +10,34 @@ function allLibraries = getAllLibraries(SharedInfo)
"src/c/hardware/rasberrypi/libraries/libgsl.a"
];
+ RPi_cvlibs = [
+ "src/c/hardware/rasberrypi/libraries/opencv/libopencv_calib3d.so"
+ "src/c/hardware/rasberrypi/libraries/opencv/libopencv_contrib.so"
+ "src/c/hardware/rasberrypi/libraries/opencv/libopencv_core.so"
+ "src/c/hardware/rasberrypi/libraries/opencv/libopencv_features2d.so"
+ "src/c/hardware/rasberrypi/libraries/opencv/libopencv_flann.so"
+ "src/c/hardware/rasberrypi/libraries/opencv/libopencv_gpu.so"
+ "src/c/hardware/rasberrypi/libraries/opencv/libopencv_highgui.so"
+ "src/c/hardware/rasberrypi/libraries/opencv/libopencv_imgproc.so"
+ "src/c/hardware/rasberrypi/libraries/opencv/libopencv_legacy.so"
+ "src/c/hardware/rasberrypi/libraries/opencv/libopencv_ml.so"
+ "src/c/hardware/rasberrypi/libraries/opencv/libopencv_nonfree.so"
+ "src/c/hardware/rasberrypi/libraries/opencv/libopencv_objdetect.so"
+ "src/c/hardware/rasberrypi/libraries/opencv/libopencv_ocl.so"
+ "src/c/hardware/rasberrypi/libraries/opencv/libopencv_photo.so"
+ "src/c/hardware/rasberrypi/libraries/opencv/libopencv_stitching.so"
+ "src/c/hardware/rasberrypi/libraries/opencv/libopencv_superres.so"
+ "src/c/hardware/rasberrypi/libraries/opencv/libopencv_video.so"
+ "src/c/hardware/rasberrypi/libraries/opencv/libopencv_videostab.so"
+ "src/c/hardware/rasberrypi/libraries/opencv/libopencv_ts.a"
+];
+
if Target == "RPi"
- allLibraries = RPi_libs;
+ allLibraries = RPi_libs;
+ if (SharedInfo.OpenCVUsed == %T)
+ allLibraries = cat(1,allLibraries,RPi_cvlibs)
+ end
+
else
allLibraries = [];
end
diff --git a/2.3-1/macros/findDeps/getAllSources.bin b/2.3-1/macros/findDeps/getAllSources.bin
index 974e9460..a6612baf 100644
--- a/2.3-1/macros/findDeps/getAllSources.bin
+++ b/2.3-1/macros/findDeps/getAllSources.bin
Binary files differ
diff --git a/2.3-1/macros/findDeps/getAllSources.sci b/2.3-1/macros/findDeps/getAllSources.sci
index dafa5032..14326b6b 100644
--- a/2.3-1/macros/findDeps/getAllSources.sci
+++ b/2.3-1/macros/findDeps/getAllSources.sci
@@ -1040,8 +1040,10 @@ function allSources = getAllSources(SharedInfo)
OpenCV_files = [
"src/c/imageProcessing/cvcore/imcvCreateImages.c"
+ "src/c/imageProcessing/cvcore/imcvGetImgSizes.c"
"src/c/imageProcessing/cvhighgui/imcvLoadImages.c"
- "src/c/imageProcessing/cvhighgui/imcvShowImages.c"];
+ "src/c/imageProcessing/cvhighgui/imcvShowImages.c"
+ "src/c/imageProcessing/cvimgproc/imcvCvtColors.c"];
if Target == "StandAlone"
allSources = Standalone_files;
diff --git a/2.3-1/macros/runsci2c.bin b/2.3-1/macros/runsci2c.bin
index 62928248..253cc785 100644
--- a/2.3-1/macros/runsci2c.bin
+++ b/2.3-1/macros/runsci2c.bin
Binary files differ
diff --git a/2.3-1/macros/runsci2c.sci b/2.3-1/macros/runsci2c.sci
index b0bb16b8..9301421f 100644
--- a/2.3-1/macros/runsci2c.sci
+++ b/2.3-1/macros/runsci2c.sci
@@ -147,10 +147,16 @@ if(~isempty(allLibraries))
PrintStepInfo('Copying libraries', FileInfo.GeneralReport,'both');
for i = 1:size(allLibraries, "*")
// DEBUG only
- //disp("Copying "+allInterfaces(i)+" in "+SCI2COutputPath+"/interfaces/");
+ //disp("Copying "+allLibraries(i)+" in "+SCI2COutputPath+"/libraries/");
copyfile(allLibraries(i), SCI2COutputPath+"/libraries/");
end
end
+
+//Copy folder containing opencv include files in Includes folder
+if((Target == 'RPi') & (SharedInfo.OpenCVUsed == %T))
+ copyfile(SCI2CHOME + "/" +'src/c/hardware/rasberrypi/libraries/opencv/opencv2/',SCI2COutputPath+"/includes/opencv2")
+end
+
// --------------------------
// --- Generate Makefile. ---
// --------------------------
@@ -185,6 +191,7 @@ else
end
+
// ------------------------------
// --- Generate SCI2C Header. ---
// ------------------------------
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/libjasper.a b/2.3-1/src/c/hardware/rasberrypi/libraries/libjasper.a
new file mode 100644
index 00000000..6867f832
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/libjasper.a
Binary files differ
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/libjpeg.a b/2.3-1/src/c/hardware/rasberrypi/libraries/libjpeg.a
new file mode 100644
index 00000000..4716fd80
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/libjpeg.a
Binary files differ
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/libtiff.a b/2.3-1/src/c/hardware/rasberrypi/libraries/libtiff.a
new file mode 100644
index 00000000..743428e6
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/libtiff.a
Binary files differ
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/libopencv_calib3d.so b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/libopencv_calib3d.so
new file mode 100644
index 00000000..357be83f
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/libopencv_calib3d.so
Binary files differ
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/libopencv_contrib.so b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/libopencv_contrib.so
new file mode 100644
index 00000000..642ed003
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/libopencv_contrib.so
Binary files differ
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/libopencv_core.so b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/libopencv_core.so
new file mode 100644
index 00000000..900f46da
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/libopencv_core.so
Binary files differ
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/libopencv_features2d.so b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/libopencv_features2d.so
new file mode 100644
index 00000000..b4d42966
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/libopencv_features2d.so
Binary files differ
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/libopencv_flann.so b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/libopencv_flann.so
new file mode 100644
index 00000000..b4af830a
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/libopencv_flann.so
Binary files differ
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/libopencv_gpu.so b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/libopencv_gpu.so
new file mode 100644
index 00000000..c30af67d
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/libopencv_gpu.so
Binary files differ
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/libopencv_highgui.so b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/libopencv_highgui.so
new file mode 100644
index 00000000..9a502296
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/libopencv_highgui.so
Binary files differ
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/libopencv_imgproc.so b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/libopencv_imgproc.so
new file mode 100644
index 00000000..28bd1611
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/libopencv_imgproc.so
Binary files differ
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/libopencv_legacy.so b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/libopencv_legacy.so
new file mode 100644
index 00000000..37d231f7
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/libopencv_legacy.so
Binary files differ
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/libopencv_ml.so b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/libopencv_ml.so
new file mode 100644
index 00000000..b31bf423
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/libopencv_ml.so
Binary files differ
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/libopencv_nonfree.so b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/libopencv_nonfree.so
new file mode 100644
index 00000000..53fdba22
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/libopencv_nonfree.so
Binary files differ
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/libopencv_objdetect.so b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/libopencv_objdetect.so
new file mode 100644
index 00000000..2fd44680
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/libopencv_objdetect.so
Binary files differ
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/libopencv_ocl.so b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/libopencv_ocl.so
new file mode 100644
index 00000000..6543e943
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/libopencv_ocl.so
Binary files differ
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/libopencv_photo.so b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/libopencv_photo.so
new file mode 100644
index 00000000..4ef7a4e3
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/libopencv_photo.so
Binary files differ
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/libopencv_stitching.so b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/libopencv_stitching.so
new file mode 100644
index 00000000..8e4ed8e9
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/libopencv_stitching.so
Binary files differ
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/libopencv_superres.so b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/libopencv_superres.so
new file mode 100644
index 00000000..02335315
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/libopencv_superres.so
Binary files differ
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/libopencv_ts.a b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/libopencv_ts.a
new file mode 100644
index 00000000..aea7e97d
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/libopencv_ts.a
Binary files differ
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/libopencv_video.so b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/libopencv_video.so
new file mode 100644
index 00000000..9c05c5d2
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/libopencv_video.so
Binary files differ
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/libopencv_videostab.so b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/libopencv_videostab.so
new file mode 100644
index 00000000..a44efbc6
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/libopencv_videostab.so
Binary files differ
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/calib3d.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/calib3d.hpp
new file mode 100644
index 00000000..7356c151
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/calib3d.hpp
@@ -0,0 +1,43 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#include "opencv2/calib3d/calib3d.hpp"
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/calib3d/calib3d.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/calib3d/calib3d.hpp
new file mode 100644
index 00000000..5e9cde8e
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/calib3d/calib3d.hpp
@@ -0,0 +1,811 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_CALIB3D_HPP__
+#define __OPENCV_CALIB3D_HPP__
+
+#include "opencv2/core/core.hpp"
+#include "opencv2/features2d/features2d.hpp"
+#include "opencv2/core/affine.hpp"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/****************************************************************************************\
+* Camera Calibration, Pose Estimation and Stereo *
+\****************************************************************************************/
+
+typedef struct CvPOSITObject CvPOSITObject;
+
+/* Allocates and initializes CvPOSITObject structure before doing cvPOSIT */
+CVAPI(CvPOSITObject*) cvCreatePOSITObject( CvPoint3D32f* points, int point_count );
+
+
+/* Runs POSIT (POSe from ITeration) algorithm for determining 3d position of
+ an object given its model and projection in a weak-perspective case */
+CVAPI(void) cvPOSIT( CvPOSITObject* posit_object, CvPoint2D32f* image_points,
+ double focal_length, CvTermCriteria criteria,
+ float* rotation_matrix, float* translation_vector);
+
+/* Releases CvPOSITObject structure */
+CVAPI(void) cvReleasePOSITObject( CvPOSITObject** posit_object );
+
+/* updates the number of RANSAC iterations */
+CVAPI(int) cvRANSACUpdateNumIters( double p, double err_prob,
+ int model_points, int max_iters );
+
+CVAPI(void) cvConvertPointsHomogeneous( const CvMat* src, CvMat* dst );
+
+/* Calculates fundamental matrix given a set of corresponding points */
+#define CV_FM_7POINT 1
+#define CV_FM_8POINT 2
+
+#define CV_LMEDS 4
+#define CV_RANSAC 8
+
+#define CV_FM_LMEDS_ONLY CV_LMEDS
+#define CV_FM_RANSAC_ONLY CV_RANSAC
+#define CV_FM_LMEDS CV_LMEDS
+#define CV_FM_RANSAC CV_RANSAC
+
+enum
+{
+ CV_ITERATIVE = 0,
+ CV_EPNP = 1, // F.Moreno-Noguer, V.Lepetit and P.Fua "EPnP: Efficient Perspective-n-Point Camera Pose Estimation"
+ CV_P3P = 2 // X.S. Gao, X.-R. Hou, J. Tang, H.-F. Chang; "Complete Solution Classification for the Perspective-Three-Point Problem"
+};
+
+CVAPI(int) cvFindFundamentalMat( const CvMat* points1, const CvMat* points2,
+ CvMat* fundamental_matrix,
+ int method CV_DEFAULT(CV_FM_RANSAC),
+ double param1 CV_DEFAULT(3.), double param2 CV_DEFAULT(0.99),
+ CvMat* status CV_DEFAULT(NULL) );
+
+/* For each input point on one of images
+ computes parameters of the corresponding
+ epipolar line on the other image */
+CVAPI(void) cvComputeCorrespondEpilines( const CvMat* points,
+ int which_image,
+ const CvMat* fundamental_matrix,
+ CvMat* correspondent_lines );
+
+/* Triangulation functions */
+
+CVAPI(void) cvTriangulatePoints(CvMat* projMatr1, CvMat* projMatr2,
+ CvMat* projPoints1, CvMat* projPoints2,
+ CvMat* points4D);
+
+CVAPI(void) cvCorrectMatches(CvMat* F, CvMat* points1, CvMat* points2,
+ CvMat* new_points1, CvMat* new_points2);
+
+
+/* Computes the optimal new camera matrix according to the free scaling parameter alpha:
+ alpha=0 - only valid pixels will be retained in the undistorted image
+ alpha=1 - all the source image pixels will be retained in the undistorted image
+*/
+CVAPI(void) cvGetOptimalNewCameraMatrix( const CvMat* camera_matrix,
+ const CvMat* dist_coeffs,
+ CvSize image_size, double alpha,
+ CvMat* new_camera_matrix,
+ CvSize new_imag_size CV_DEFAULT(cvSize(0,0)),
+ CvRect* valid_pixel_ROI CV_DEFAULT(0),
+ int center_principal_point CV_DEFAULT(0));
+
+/* Converts rotation vector to rotation matrix or vice versa */
+CVAPI(int) cvRodrigues2( const CvMat* src, CvMat* dst,
+ CvMat* jacobian CV_DEFAULT(0) );
+
+/* Finds perspective transformation between the object plane and image (view) plane */
+CVAPI(int) cvFindHomography( const CvMat* src_points,
+ const CvMat* dst_points,
+ CvMat* homography,
+ int method CV_DEFAULT(0),
+ double ransacReprojThreshold CV_DEFAULT(3),
+ CvMat* mask CV_DEFAULT(0));
+
+/* Computes RQ decomposition for 3x3 matrices */
+CVAPI(void) cvRQDecomp3x3( const CvMat *matrixM, CvMat *matrixR, CvMat *matrixQ,
+ CvMat *matrixQx CV_DEFAULT(NULL),
+ CvMat *matrixQy CV_DEFAULT(NULL),
+ CvMat *matrixQz CV_DEFAULT(NULL),
+ CvPoint3D64f *eulerAngles CV_DEFAULT(NULL));
+
+/* Computes projection matrix decomposition */
+CVAPI(void) cvDecomposeProjectionMatrix( const CvMat *projMatr, CvMat *calibMatr,
+ CvMat *rotMatr, CvMat *posVect,
+ CvMat *rotMatrX CV_DEFAULT(NULL),
+ CvMat *rotMatrY CV_DEFAULT(NULL),
+ CvMat *rotMatrZ CV_DEFAULT(NULL),
+ CvPoint3D64f *eulerAngles CV_DEFAULT(NULL));
+
+/* Computes d(AB)/dA and d(AB)/dB */
+CVAPI(void) cvCalcMatMulDeriv( const CvMat* A, const CvMat* B, CvMat* dABdA, CvMat* dABdB );
+
+/* Computes r3 = rodrigues(rodrigues(r2)*rodrigues(r1)),
+ t3 = rodrigues(r2)*t1 + t2 and the respective derivatives */
+CVAPI(void) cvComposeRT( const CvMat* _rvec1, const CvMat* _tvec1,
+ const CvMat* _rvec2, const CvMat* _tvec2,
+ CvMat* _rvec3, CvMat* _tvec3,
+ CvMat* dr3dr1 CV_DEFAULT(0), CvMat* dr3dt1 CV_DEFAULT(0),
+ CvMat* dr3dr2 CV_DEFAULT(0), CvMat* dr3dt2 CV_DEFAULT(0),
+ CvMat* dt3dr1 CV_DEFAULT(0), CvMat* dt3dt1 CV_DEFAULT(0),
+ CvMat* dt3dr2 CV_DEFAULT(0), CvMat* dt3dt2 CV_DEFAULT(0) );
+
+/* Projects object points to the view plane using
+ the specified extrinsic and intrinsic camera parameters */
+CVAPI(void) cvProjectPoints2( const CvMat* object_points, const CvMat* rotation_vector,
+ const CvMat* translation_vector, const CvMat* camera_matrix,
+ const CvMat* distortion_coeffs, CvMat* image_points,
+ CvMat* dpdrot CV_DEFAULT(NULL), CvMat* dpdt CV_DEFAULT(NULL),
+ CvMat* dpdf CV_DEFAULT(NULL), CvMat* dpdc CV_DEFAULT(NULL),
+ CvMat* dpddist CV_DEFAULT(NULL),
+ double aspect_ratio CV_DEFAULT(0));
+
+/* Finds extrinsic camera parameters from
+ a few known corresponding point pairs and intrinsic parameters */
+CVAPI(void) cvFindExtrinsicCameraParams2( const CvMat* object_points,
+ const CvMat* image_points,
+ const CvMat* camera_matrix,
+ const CvMat* distortion_coeffs,
+ CvMat* rotation_vector,
+ CvMat* translation_vector,
+ int use_extrinsic_guess CV_DEFAULT(0) );
+
+/* Computes initial estimate of the intrinsic camera parameters
+ in case of planar calibration target (e.g. chessboard) */
+CVAPI(void) cvInitIntrinsicParams2D( const CvMat* object_points,
+ const CvMat* image_points,
+ const CvMat* npoints, CvSize image_size,
+ CvMat* camera_matrix,
+ double aspect_ratio CV_DEFAULT(1.) );
+
+#define CV_CALIB_CB_ADAPTIVE_THRESH 1
+#define CV_CALIB_CB_NORMALIZE_IMAGE 2
+#define CV_CALIB_CB_FILTER_QUADS 4
+#define CV_CALIB_CB_FAST_CHECK 8
+
+// Performs a fast check if a chessboard is in the input image. This is a workaround to
+// a problem of cvFindChessboardCorners being slow on images with no chessboard
+// - src: input image
+// - size: chessboard size
+// Returns 1 if a chessboard can be in this image and findChessboardCorners should be called,
+// 0 if there is no chessboard, -1 in case of error
+CVAPI(int) cvCheckChessboard(IplImage* src, CvSize size);
+
+ /* Detects corners on a chessboard calibration pattern */
+CVAPI(int) cvFindChessboardCorners( const void* image, CvSize pattern_size,
+ CvPoint2D32f* corners,
+ int* corner_count CV_DEFAULT(NULL),
+ int flags CV_DEFAULT(CV_CALIB_CB_ADAPTIVE_THRESH+CV_CALIB_CB_NORMALIZE_IMAGE) );
+
+/* Draws individual chessboard corners or the whole chessboard detected */
+CVAPI(void) cvDrawChessboardCorners( CvArr* image, CvSize pattern_size,
+ CvPoint2D32f* corners,
+ int count, int pattern_was_found );
+
+#define CV_CALIB_USE_INTRINSIC_GUESS 1
+#define CV_CALIB_FIX_ASPECT_RATIO 2
+#define CV_CALIB_FIX_PRINCIPAL_POINT 4
+#define CV_CALIB_ZERO_TANGENT_DIST 8
+#define CV_CALIB_FIX_FOCAL_LENGTH 16
+#define CV_CALIB_FIX_K1 32
+#define CV_CALIB_FIX_K2 64
+#define CV_CALIB_FIX_K3 128
+#define CV_CALIB_FIX_K4 2048
+#define CV_CALIB_FIX_K5 4096
+#define CV_CALIB_FIX_K6 8192
+#define CV_CALIB_RATIONAL_MODEL 16384
+
+/* Finds intrinsic and extrinsic camera parameters
+ from a few views of known calibration pattern */
+CVAPI(double) cvCalibrateCamera2( const CvMat* object_points,
+ const CvMat* image_points,
+ const CvMat* point_counts,
+ CvSize image_size,
+ CvMat* camera_matrix,
+ CvMat* distortion_coeffs,
+ CvMat* rotation_vectors CV_DEFAULT(NULL),
+ CvMat* translation_vectors CV_DEFAULT(NULL),
+ int flags CV_DEFAULT(0),
+ CvTermCriteria term_crit CV_DEFAULT(cvTermCriteria(
+ CV_TERMCRIT_ITER+CV_TERMCRIT_EPS,30,DBL_EPSILON)) );
+
+/* Computes various useful characteristics of the camera from the data computed by
+ cvCalibrateCamera2 */
+CVAPI(void) cvCalibrationMatrixValues( const CvMat *camera_matrix,
+ CvSize image_size,
+ double aperture_width CV_DEFAULT(0),
+ double aperture_height CV_DEFAULT(0),
+ double *fovx CV_DEFAULT(NULL),
+ double *fovy CV_DEFAULT(NULL),
+ double *focal_length CV_DEFAULT(NULL),
+ CvPoint2D64f *principal_point CV_DEFAULT(NULL),
+ double *pixel_aspect_ratio CV_DEFAULT(NULL));
+
+#define CV_CALIB_FIX_INTRINSIC 256
+#define CV_CALIB_SAME_FOCAL_LENGTH 512
+
+/* Computes the transformation from one camera coordinate system to another one
+ from a few correspondent views of the same calibration target. Optionally, calibrates
+ both cameras */
+CVAPI(double) cvStereoCalibrate( const CvMat* object_points, const CvMat* image_points1,
+ const CvMat* image_points2, const CvMat* npoints,
+ CvMat* camera_matrix1, CvMat* dist_coeffs1,
+ CvMat* camera_matrix2, CvMat* dist_coeffs2,
+ CvSize image_size, CvMat* R, CvMat* T,
+ CvMat* E CV_DEFAULT(0), CvMat* F CV_DEFAULT(0),
+ CvTermCriteria term_crit CV_DEFAULT(cvTermCriteria(
+ CV_TERMCRIT_ITER+CV_TERMCRIT_EPS,30,1e-6)),
+ int flags CV_DEFAULT(CV_CALIB_FIX_INTRINSIC));
+
+#define CV_CALIB_ZERO_DISPARITY 1024
+
+/* Computes 3D rotations (+ optional shift) for each camera coordinate system to make both
+ views parallel (=> to make all the epipolar lines horizontal or vertical) */
+CVAPI(void) cvStereoRectify( const CvMat* camera_matrix1, const CvMat* camera_matrix2,
+ const CvMat* dist_coeffs1, const CvMat* dist_coeffs2,
+ CvSize image_size, const CvMat* R, const CvMat* T,
+ CvMat* R1, CvMat* R2, CvMat* P1, CvMat* P2,
+ CvMat* Q CV_DEFAULT(0),
+ int flags CV_DEFAULT(CV_CALIB_ZERO_DISPARITY),
+ double alpha CV_DEFAULT(-1),
+ CvSize new_image_size CV_DEFAULT(cvSize(0,0)),
+ CvRect* valid_pix_ROI1 CV_DEFAULT(0),
+ CvRect* valid_pix_ROI2 CV_DEFAULT(0));
+
+/* Computes rectification transformations for uncalibrated pair of images using a set
+ of point correspondences */
+CVAPI(int) cvStereoRectifyUncalibrated( const CvMat* points1, const CvMat* points2,
+ const CvMat* F, CvSize img_size,
+ CvMat* H1, CvMat* H2,
+ double threshold CV_DEFAULT(5));
+
+
+
+/* stereo correspondence parameters and functions */
+
+#define CV_STEREO_BM_NORMALIZED_RESPONSE 0
+#define CV_STEREO_BM_XSOBEL 1
+
+/* Block matching algorithm structure */
+typedef struct CvStereoBMState
+{
+ // pre-filtering (normalization of input images)
+ int preFilterType; // =CV_STEREO_BM_NORMALIZED_RESPONSE now
+ int preFilterSize; // averaging window size: ~5x5..21x21
+ int preFilterCap; // the output of pre-filtering is clipped by [-preFilterCap,preFilterCap]
+
+ // correspondence using Sum of Absolute Difference (SAD)
+ int SADWindowSize; // ~5x5..21x21
+ int minDisparity; // minimum disparity (can be negative)
+ int numberOfDisparities; // maximum disparity - minimum disparity (> 0)
+
+ // post-filtering
+ int textureThreshold; // the disparity is only computed for pixels
+ // with textured enough neighborhood
+ int uniquenessRatio; // accept the computed disparity d* only if
+ // SAD(d) >= SAD(d*)*(1 + uniquenessRatio/100.)
+ // for any d != d*+/-1 within the search range.
+ int speckleWindowSize; // disparity variation window
+ int speckleRange; // acceptable range of variation in window
+
+ int trySmallerWindows; // if 1, the results may be more accurate,
+ // at the expense of slower processing
+ CvRect roi1, roi2;
+ int disp12MaxDiff;
+
+ // temporary buffers
+ CvMat* preFilteredImg0;
+ CvMat* preFilteredImg1;
+ CvMat* slidingSumBuf;
+ CvMat* cost;
+ CvMat* disp;
+} CvStereoBMState;
+
+#define CV_STEREO_BM_BASIC 0
+#define CV_STEREO_BM_FISH_EYE 1
+#define CV_STEREO_BM_NARROW 2
+
+CVAPI(CvStereoBMState*) cvCreateStereoBMState(int preset CV_DEFAULT(CV_STEREO_BM_BASIC),
+ int numberOfDisparities CV_DEFAULT(0));
+
+CVAPI(void) cvReleaseStereoBMState( CvStereoBMState** state );
+
+CVAPI(void) cvFindStereoCorrespondenceBM( const CvArr* left, const CvArr* right,
+ CvArr* disparity, CvStereoBMState* state );
+
+CVAPI(CvRect) cvGetValidDisparityROI( CvRect roi1, CvRect roi2, int minDisparity,
+ int numberOfDisparities, int SADWindowSize );
+
+CVAPI(void) cvValidateDisparity( CvArr* disparity, const CvArr* cost,
+ int minDisparity, int numberOfDisparities,
+ int disp12MaxDiff CV_DEFAULT(1) );
+
+/* Reprojects the computed disparity image to the 3D space using the specified 4x4 matrix */
+CVAPI(void) cvReprojectImageTo3D( const CvArr* disparityImage,
+ CvArr* _3dImage, const CvMat* Q,
+ int handleMissingValues CV_DEFAULT(0) );
+
+
+#ifdef __cplusplus
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////
+class CV_EXPORTS CvLevMarq
+{
+public:
+ CvLevMarq();
+ CvLevMarq( int nparams, int nerrs, CvTermCriteria criteria=
+ cvTermCriteria(CV_TERMCRIT_EPS+CV_TERMCRIT_ITER,30,DBL_EPSILON),
+ bool completeSymmFlag=false );
+ ~CvLevMarq();
+ void init( int nparams, int nerrs, CvTermCriteria criteria=
+ cvTermCriteria(CV_TERMCRIT_EPS+CV_TERMCRIT_ITER,30,DBL_EPSILON),
+ bool completeSymmFlag=false );
+ bool update( const CvMat*& param, CvMat*& J, CvMat*& err );
+ bool updateAlt( const CvMat*& param, CvMat*& JtJ, CvMat*& JtErr, double*& errNorm );
+
+ void clear();
+ void step();
+ enum { DONE=0, STARTED=1, CALC_J=2, CHECK_ERR=3 };
+
+ cv::Ptr<CvMat> mask;
+ cv::Ptr<CvMat> prevParam;
+ cv::Ptr<CvMat> param;
+ cv::Ptr<CvMat> J;
+ cv::Ptr<CvMat> err;
+ cv::Ptr<CvMat> JtJ;
+ cv::Ptr<CvMat> JtJN;
+ cv::Ptr<CvMat> JtErr;
+ cv::Ptr<CvMat> JtJV;
+ cv::Ptr<CvMat> JtJW;
+ double prevErrNorm, errNorm;
+ int lambdaLg10;
+ CvTermCriteria criteria;
+ int state;
+ int iters;
+ bool completeSymmFlag;
+};
+
+namespace cv
+{
+//! converts rotation vector to rotation matrix or vice versa using Rodrigues transformation
+CV_EXPORTS_W void Rodrigues(InputArray src, OutputArray dst, OutputArray jacobian=noArray());
+
+//! type of the robust estimation algorithm
+enum
+{
+ LMEDS=CV_LMEDS, //!< least-median algorithm
+ RANSAC=CV_RANSAC //!< RANSAC algorithm
+};
+
+//! computes the best-fit perspective transformation mapping srcPoints to dstPoints.
+CV_EXPORTS_W Mat findHomography( InputArray srcPoints, InputArray dstPoints,
+ int method=0, double ransacReprojThreshold=3,
+ OutputArray mask=noArray());
+
+//! variant of findHomography for backward compatibility
+CV_EXPORTS Mat findHomography( InputArray srcPoints, InputArray dstPoints,
+ OutputArray mask, int method=0, double ransacReprojThreshold=3);
+
+//! Computes RQ decomposition of 3x3 matrix
+CV_EXPORTS_W Vec3d RQDecomp3x3( InputArray src, OutputArray mtxR, OutputArray mtxQ,
+ OutputArray Qx=noArray(),
+ OutputArray Qy=noArray(),
+ OutputArray Qz=noArray());
+
+//! Decomposes the projection matrix into camera matrix and the rotation martix and the translation vector
+CV_EXPORTS_W void decomposeProjectionMatrix( InputArray projMatrix, OutputArray cameraMatrix,
+ OutputArray rotMatrix, OutputArray transVect,
+ OutputArray rotMatrixX=noArray(),
+ OutputArray rotMatrixY=noArray(),
+ OutputArray rotMatrixZ=noArray(),
+ OutputArray eulerAngles=noArray() );
+
+//! computes derivatives of the matrix product w.r.t each of the multiplied matrix coefficients
+CV_EXPORTS_W void matMulDeriv( InputArray A, InputArray B,
+ OutputArray dABdA,
+ OutputArray dABdB );
+
+//! composes 2 [R|t] transformations together. Also computes the derivatives of the result w.r.t the arguments
+CV_EXPORTS_W void composeRT( InputArray rvec1, InputArray tvec1,
+ InputArray rvec2, InputArray tvec2,
+ OutputArray rvec3, OutputArray tvec3,
+ OutputArray dr3dr1=noArray(), OutputArray dr3dt1=noArray(),
+ OutputArray dr3dr2=noArray(), OutputArray dr3dt2=noArray(),
+ OutputArray dt3dr1=noArray(), OutputArray dt3dt1=noArray(),
+ OutputArray dt3dr2=noArray(), OutputArray dt3dt2=noArray() );
+
+//! projects points from the model coordinate space to the image coordinates. Also computes derivatives of the image coordinates w.r.t the intrinsic and extrinsic camera parameters
+CV_EXPORTS_W void projectPoints( InputArray objectPoints,
+ InputArray rvec, InputArray tvec,
+ InputArray cameraMatrix, InputArray distCoeffs,
+ OutputArray imagePoints,
+ OutputArray jacobian=noArray(),
+ double aspectRatio=0 );
+
+//! computes the camera pose from a few 3D points and the corresponding projections. The outliers are not handled.
+enum
+{
+ ITERATIVE=CV_ITERATIVE,
+ EPNP=CV_EPNP,
+ P3P=CV_P3P
+};
+CV_EXPORTS_W bool solvePnP( InputArray objectPoints, InputArray imagePoints,
+ InputArray cameraMatrix, InputArray distCoeffs,
+ OutputArray rvec, OutputArray tvec,
+ bool useExtrinsicGuess=false, int flags=ITERATIVE);
+
+//! computes the camera pose from a few 3D points and the corresponding projections. The outliers are possible.
+CV_EXPORTS_W void solvePnPRansac( InputArray objectPoints,
+ InputArray imagePoints,
+ InputArray cameraMatrix,
+ InputArray distCoeffs,
+ OutputArray rvec,
+ OutputArray tvec,
+ bool useExtrinsicGuess = false,
+ int iterationsCount = 100,
+ float reprojectionError = 8.0,
+ int minInliersCount = 100,
+ OutputArray inliers = noArray(),
+ int flags = ITERATIVE);
+
+//! initializes camera matrix from a few 3D points and the corresponding projections.
+CV_EXPORTS_W Mat initCameraMatrix2D( InputArrayOfArrays objectPoints,
+ InputArrayOfArrays imagePoints,
+ Size imageSize, double aspectRatio=1. );
+
+enum { CALIB_CB_ADAPTIVE_THRESH = 1, CALIB_CB_NORMALIZE_IMAGE = 2,
+ CALIB_CB_FILTER_QUADS = 4, CALIB_CB_FAST_CHECK = 8 };
+
+//! finds checkerboard pattern of the specified size in the image
+CV_EXPORTS_W bool findChessboardCorners( InputArray image, Size patternSize,
+ OutputArray corners,
+ int flags=CALIB_CB_ADAPTIVE_THRESH+CALIB_CB_NORMALIZE_IMAGE );
+
+//! finds subpixel-accurate positions of the chessboard corners
+CV_EXPORTS bool find4QuadCornerSubpix(InputArray img, InputOutputArray corners, Size region_size);
+
+//! draws the checkerboard pattern (found or partly found) in the image
+CV_EXPORTS_W void drawChessboardCorners( InputOutputArray image, Size patternSize,
+ InputArray corners, bool patternWasFound );
+
+enum { CALIB_CB_SYMMETRIC_GRID = 1, CALIB_CB_ASYMMETRIC_GRID = 2,
+ CALIB_CB_CLUSTERING = 4 };
+
+//! finds circles' grid pattern of the specified size in the image
+CV_EXPORTS_W bool findCirclesGrid( InputArray image, Size patternSize,
+ OutputArray centers, int flags=CALIB_CB_SYMMETRIC_GRID,
+ const Ptr<FeatureDetector> &blobDetector = new SimpleBlobDetector());
+
+//! the deprecated function. Use findCirclesGrid() instead of it.
+CV_EXPORTS_W bool findCirclesGridDefault( InputArray image, Size patternSize,
+ OutputArray centers, int flags=CALIB_CB_SYMMETRIC_GRID );
+enum
+{
+ CALIB_USE_INTRINSIC_GUESS = CV_CALIB_USE_INTRINSIC_GUESS,
+ CALIB_FIX_ASPECT_RATIO = CV_CALIB_FIX_ASPECT_RATIO,
+ CALIB_FIX_PRINCIPAL_POINT = CV_CALIB_FIX_PRINCIPAL_POINT,
+ CALIB_ZERO_TANGENT_DIST = CV_CALIB_ZERO_TANGENT_DIST,
+ CALIB_FIX_FOCAL_LENGTH = CV_CALIB_FIX_FOCAL_LENGTH,
+ CALIB_FIX_K1 = CV_CALIB_FIX_K1,
+ CALIB_FIX_K2 = CV_CALIB_FIX_K2,
+ CALIB_FIX_K3 = CV_CALIB_FIX_K3,
+ CALIB_FIX_K4 = CV_CALIB_FIX_K4,
+ CALIB_FIX_K5 = CV_CALIB_FIX_K5,
+ CALIB_FIX_K6 = CV_CALIB_FIX_K6,
+ CALIB_RATIONAL_MODEL = CV_CALIB_RATIONAL_MODEL,
+ // only for stereo
+ CALIB_FIX_INTRINSIC = CV_CALIB_FIX_INTRINSIC,
+ CALIB_SAME_FOCAL_LENGTH = CV_CALIB_SAME_FOCAL_LENGTH,
+ // for stereo rectification
+ CALIB_ZERO_DISPARITY = CV_CALIB_ZERO_DISPARITY
+};
+
+//! finds intrinsic and extrinsic camera parameters from several fews of a known calibration pattern.
+CV_EXPORTS_W double calibrateCamera( InputArrayOfArrays objectPoints,
+ InputArrayOfArrays imagePoints,
+ Size imageSize,
+ CV_OUT InputOutputArray cameraMatrix,
+ CV_OUT InputOutputArray distCoeffs,
+ OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs,
+ int flags=0, TermCriteria criteria = TermCriteria(
+ TermCriteria::COUNT+TermCriteria::EPS, 30, DBL_EPSILON) );
+
+//! computes several useful camera characteristics from the camera matrix, camera frame resolution and the physical sensor size.
+CV_EXPORTS_W void calibrationMatrixValues( InputArray cameraMatrix,
+ Size imageSize,
+ double apertureWidth,
+ double apertureHeight,
+ CV_OUT double& fovx,
+ CV_OUT double& fovy,
+ CV_OUT double& focalLength,
+ CV_OUT Point2d& principalPoint,
+ CV_OUT double& aspectRatio );
+
+//! finds intrinsic and extrinsic parameters of a stereo camera
+CV_EXPORTS_W double stereoCalibrate( InputArrayOfArrays objectPoints,
+ InputArrayOfArrays imagePoints1,
+ InputArrayOfArrays imagePoints2,
+ CV_OUT InputOutputArray cameraMatrix1,
+ CV_OUT InputOutputArray distCoeffs1,
+ CV_OUT InputOutputArray cameraMatrix2,
+ CV_OUT InputOutputArray distCoeffs2,
+ Size imageSize, OutputArray R,
+ OutputArray T, OutputArray E, OutputArray F,
+ TermCriteria criteria = TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, 1e-6),
+ int flags=CALIB_FIX_INTRINSIC );
+
+
+//! computes the rectification transformation for a stereo camera from its intrinsic and extrinsic parameters
+CV_EXPORTS_W void stereoRectify( InputArray cameraMatrix1, InputArray distCoeffs1,
+ InputArray cameraMatrix2, InputArray distCoeffs2,
+ Size imageSize, InputArray R, InputArray T,
+ OutputArray R1, OutputArray R2,
+ OutputArray P1, OutputArray P2,
+ OutputArray Q, int flags=CALIB_ZERO_DISPARITY,
+ double alpha=-1, Size newImageSize=Size(),
+ CV_OUT Rect* validPixROI1=0, CV_OUT Rect* validPixROI2=0 );
+
+//! computes the rectification transformation for an uncalibrated stereo camera (zero distortion is assumed)
+CV_EXPORTS_W bool stereoRectifyUncalibrated( InputArray points1, InputArray points2,
+ InputArray F, Size imgSize,
+ OutputArray H1, OutputArray H2,
+ double threshold=5 );
+
+//! computes the rectification transformations for 3-head camera, where all the heads are on the same line.
+CV_EXPORTS_W float rectify3Collinear( InputArray cameraMatrix1, InputArray distCoeffs1,
+ InputArray cameraMatrix2, InputArray distCoeffs2,
+ InputArray cameraMatrix3, InputArray distCoeffs3,
+ InputArrayOfArrays imgpt1, InputArrayOfArrays imgpt3,
+ Size imageSize, InputArray R12, InputArray T12,
+ InputArray R13, InputArray T13,
+ OutputArray R1, OutputArray R2, OutputArray R3,
+ OutputArray P1, OutputArray P2, OutputArray P3,
+ OutputArray Q, double alpha, Size newImgSize,
+ CV_OUT Rect* roi1, CV_OUT Rect* roi2, int flags );
+
+//! returns the optimal new camera matrix
+CV_EXPORTS_W Mat getOptimalNewCameraMatrix( InputArray cameraMatrix, InputArray distCoeffs,
+ Size imageSize, double alpha, Size newImgSize=Size(),
+ CV_OUT Rect* validPixROI=0, bool centerPrincipalPoint=false);
+
+//! converts point coordinates from normal pixel coordinates to homogeneous coordinates ((x,y)->(x,y,1))
+CV_EXPORTS_W void convertPointsToHomogeneous( InputArray src, OutputArray dst );
+
+//! converts point coordinates from homogeneous to normal pixel coordinates ((x,y,z)->(x/z, y/z))
+CV_EXPORTS_W void convertPointsFromHomogeneous( InputArray src, OutputArray dst );
+
+//! for backward compatibility
+CV_EXPORTS void convertPointsHomogeneous( InputArray src, OutputArray dst );
+
+//! the algorithm for finding fundamental matrix
+enum
+{
+ FM_7POINT = CV_FM_7POINT, //!< 7-point algorithm
+ FM_8POINT = CV_FM_8POINT, //!< 8-point algorithm
+ FM_LMEDS = CV_FM_LMEDS, //!< least-median algorithm
+ FM_RANSAC = CV_FM_RANSAC //!< RANSAC algorithm
+};
+
+//! finds fundamental matrix from a set of corresponding 2D points
+CV_EXPORTS_W Mat findFundamentalMat( InputArray points1, InputArray points2,
+ int method=FM_RANSAC,
+ double param1=3., double param2=0.99,
+ OutputArray mask=noArray());
+
+//! variant of findFundamentalMat for backward compatibility
+CV_EXPORTS Mat findFundamentalMat( InputArray points1, InputArray points2,
+ OutputArray mask, int method=FM_RANSAC,
+ double param1=3., double param2=0.99);
+
+//! finds coordinates of epipolar lines corresponding the specified points
+CV_EXPORTS_W void computeCorrespondEpilines( InputArray points,
+ int whichImage, InputArray F,
+ OutputArray lines );
+
+CV_EXPORTS_W void triangulatePoints( InputArray projMatr1, InputArray projMatr2,
+ InputArray projPoints1, InputArray projPoints2,
+ OutputArray points4D );
+
+CV_EXPORTS_W void correctMatches( InputArray F, InputArray points1, InputArray points2,
+ OutputArray newPoints1, OutputArray newPoints2 );
+
+template<> CV_EXPORTS void Ptr<CvStereoBMState>::delete_obj();
+
+/*!
+ Block Matching Stereo Correspondence Algorithm
+
+ The class implements BM stereo correspondence algorithm by K. Konolige.
+*/
+class CV_EXPORTS_W StereoBM
+{
+public:
+ enum { PREFILTER_NORMALIZED_RESPONSE = 0, PREFILTER_XSOBEL = 1,
+ BASIC_PRESET=0, FISH_EYE_PRESET=1, NARROW_PRESET=2 };
+
+ //! the default constructor
+ CV_WRAP StereoBM();
+ //! the full constructor taking the camera-specific preset, number of disparities and the SAD window size
+ CV_WRAP StereoBM(int preset, int ndisparities=0, int SADWindowSize=21);
+ //! the method that reinitializes the state. The previous content is destroyed
+ void init(int preset, int ndisparities=0, int SADWindowSize=21);
+ //! the stereo correspondence operator. Finds the disparity for the specified rectified stereo pair
+ CV_WRAP_AS(compute) void operator()( InputArray left, InputArray right,
+ OutputArray disparity, int disptype=CV_16S );
+
+ //! pointer to the underlying CvStereoBMState
+ Ptr<CvStereoBMState> state;
+};
+
+
+/*!
+ Semi-Global Block Matching Stereo Correspondence Algorithm
+
+ The class implements the original SGBM stereo correspondence algorithm by H. Hirschmuller and some its modification.
+ */
+class CV_EXPORTS_W StereoSGBM
+{
+public:
+ enum { DISP_SHIFT=4, DISP_SCALE = (1<<DISP_SHIFT) };
+
+ //! the default constructor
+ CV_WRAP StereoSGBM();
+
+ //! the full constructor taking all the necessary algorithm parameters
+ CV_WRAP StereoSGBM(int minDisparity, int numDisparities, int SADWindowSize,
+ int P1=0, int P2=0, int disp12MaxDiff=0,
+ int preFilterCap=0, int uniquenessRatio=0,
+ int speckleWindowSize=0, int speckleRange=0,
+ bool fullDP=false);
+ //! the destructor
+ virtual ~StereoSGBM();
+
+ //! the stereo correspondence operator that computes disparity map for the specified rectified stereo pair
+ CV_WRAP_AS(compute) virtual void operator()(InputArray left, InputArray right,
+ OutputArray disp);
+
+ CV_PROP_RW int minDisparity;
+ CV_PROP_RW int numberOfDisparities;
+ CV_PROP_RW int SADWindowSize;
+ CV_PROP_RW int preFilterCap;
+ CV_PROP_RW int uniquenessRatio;
+ CV_PROP_RW int P1;
+ CV_PROP_RW int P2;
+ CV_PROP_RW int speckleWindowSize;
+ CV_PROP_RW int speckleRange;
+ CV_PROP_RW int disp12MaxDiff;
+ CV_PROP_RW bool fullDP;
+
+protected:
+ Mat buffer;
+};
+
+//! filters off speckles (small regions of incorrectly computed disparity)
+CV_EXPORTS_W void filterSpeckles( InputOutputArray img, double newVal, int maxSpeckleSize, double maxDiff,
+ InputOutputArray buf=noArray() );
+
+//! computes valid disparity ROI from the valid ROIs of the rectified images (that are returned by cv::stereoRectify())
+CV_EXPORTS_W Rect getValidDisparityROI( Rect roi1, Rect roi2,
+ int minDisparity, int numberOfDisparities,
+ int SADWindowSize );
+
+//! validates disparity using the left-right check. The matrix "cost" should be computed by the stereo correspondence algorithm
+CV_EXPORTS_W void validateDisparity( InputOutputArray disparity, InputArray cost,
+ int minDisparity, int numberOfDisparities,
+ int disp12MaxDisp=1 );
+
+//! reprojects disparity image to 3D: (x,y,d)->(X,Y,Z) using the matrix Q returned by cv::stereoRectify
+CV_EXPORTS_W void reprojectImageTo3D( InputArray disparity,
+ OutputArray _3dImage, InputArray Q,
+ bool handleMissingValues=false,
+ int ddepth=-1 );
+
+CV_EXPORTS_W int estimateAffine3D(InputArray src, InputArray dst,
+ OutputArray out, OutputArray inliers,
+ double ransacThreshold=3, double confidence=0.99);
+
+namespace fisheye
+{
+ enum{
+ CALIB_USE_INTRINSIC_GUESS = 1,
+ CALIB_RECOMPUTE_EXTRINSIC = 2,
+ CALIB_CHECK_COND = 4,
+ CALIB_FIX_SKEW = 8,
+ CALIB_FIX_K1 = 16,
+ CALIB_FIX_K2 = 32,
+ CALIB_FIX_K3 = 64,
+ CALIB_FIX_K4 = 128,
+ CALIB_FIX_INTRINSIC = 256
+ };
+
+ //! projects 3D points using fisheye model
+ CV_EXPORTS void projectPoints(InputArray objectPoints, OutputArray imagePoints, const Affine3d& affine,
+ InputArray K, InputArray D, double alpha = 0, OutputArray jacobian = noArray());
+
+ //! projects points using fisheye model
+ CV_EXPORTS void projectPoints(InputArray objectPoints, OutputArray imagePoints, InputArray rvec, InputArray tvec,
+ InputArray K, InputArray D, double alpha = 0, OutputArray jacobian = noArray());
+
+ //! distorts 2D points using fisheye model
+ CV_EXPORTS void distortPoints(InputArray undistorted, OutputArray distorted, InputArray K, InputArray D, double alpha = 0);
+
+ //! undistorts 2D points using fisheye model
+ CV_EXPORTS void undistortPoints(InputArray distorted, OutputArray undistorted,
+ InputArray K, InputArray D, InputArray R = noArray(), InputArray P = noArray());
+
+ //! computing undistortion and rectification maps for image transform by cv::remap()
+ //! If D is empty zero distortion is used, if R or P is empty identity matrixes are used
+ CV_EXPORTS void initUndistortRectifyMap(InputArray K, InputArray D, InputArray R, InputArray P,
+ const cv::Size& size, int m1type, OutputArray map1, OutputArray map2);
+
+ //! undistorts image, optionally changes resolution and camera matrix. If Knew zero identity matrix is used
+ CV_EXPORTS void undistortImage(InputArray distorted, OutputArray undistorted,
+ InputArray K, InputArray D, InputArray Knew = cv::noArray(), const Size& new_size = Size());
+
+ //! estimates new camera matrix for undistortion or rectification
+ CV_EXPORTS void estimateNewCameraMatrixForUndistortRectify(InputArray K, InputArray D, const Size &image_size, InputArray R,
+ OutputArray P, double balance = 0.0, const Size& new_size = Size(), double fov_scale = 1.0);
+
+ //! performs camera calibaration
+ CV_EXPORTS double calibrate(InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints, const Size& image_size,
+ InputOutputArray K, InputOutputArray D, OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs, int flags = 0,
+ TermCriteria criteria = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 100, DBL_EPSILON));
+
+ //! stereo rectification estimation
+ CV_EXPORTS void stereoRectify(InputArray K1, InputArray D1, InputArray K2, InputArray D2, const Size &imageSize, InputArray R, InputArray tvec,
+ OutputArray R1, OutputArray R2, OutputArray P1, OutputArray P2, OutputArray Q, int flags, const Size &newImageSize = Size(),
+ double balance = 0.0, double fov_scale = 1.0);
+
+ //! performs stereo calibaration
+ CV_EXPORTS double stereoCalibrate(InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints1, InputArrayOfArrays imagePoints2,
+ InputOutputArray K1, InputOutputArray D1, InputOutputArray K2, InputOutputArray D2, Size imageSize,
+ OutputArray R, OutputArray T, int flags = CALIB_FIX_INTRINSIC,
+ TermCriteria criteria = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 100, DBL_EPSILON));
+
+}
+
+}
+
+#endif
+#endif
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/contrib/contrib.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/contrib/contrib.hpp
new file mode 100644
index 00000000..d5879424
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/contrib/contrib.hpp
@@ -0,0 +1,998 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_CONTRIB_HPP__
+#define __OPENCV_CONTRIB_HPP__
+
+#include "opencv2/core/core.hpp"
+#include "opencv2/imgproc/imgproc.hpp"
+#include "opencv2/features2d/features2d.hpp"
+#include "opencv2/objdetect/objdetect.hpp"
+
+#ifdef __cplusplus
+
+/****************************************************************************************\
+* Adaptive Skin Detector *
+\****************************************************************************************/
+
+class CV_EXPORTS CvAdaptiveSkinDetector
+{
+private:
+ enum {
+ GSD_HUE_LT = 3,
+ GSD_HUE_UT = 33,
+ GSD_INTENSITY_LT = 15,
+ GSD_INTENSITY_UT = 250
+ };
+
+ class CV_EXPORTS Histogram
+ {
+ private:
+ enum {
+ HistogramSize = (GSD_HUE_UT - GSD_HUE_LT + 1)
+ };
+
+ protected:
+ int findCoverageIndex(double surfaceToCover, int defaultValue = 0);
+
+ public:
+ CvHistogram *fHistogram;
+ Histogram();
+ virtual ~Histogram();
+
+ void findCurveThresholds(int &x1, int &x2, double percent = 0.05);
+ void mergeWith(Histogram *source, double weight);
+ };
+
+ int nStartCounter, nFrameCount, nSkinHueLowerBound, nSkinHueUpperBound, nMorphingMethod, nSamplingDivider;
+ double fHistogramMergeFactor, fHuePercentCovered;
+ Histogram histogramHueMotion, skinHueHistogram;
+ IplImage *imgHueFrame, *imgSaturationFrame, *imgLastGrayFrame, *imgMotionFrame, *imgFilteredFrame;
+ IplImage *imgShrinked, *imgTemp, *imgGrayFrame, *imgHSVFrame;
+
+protected:
+ void initData(IplImage *src, int widthDivider, int heightDivider);
+ void adaptiveFilter();
+
+public:
+
+ enum {
+ MORPHING_METHOD_NONE = 0,
+ MORPHING_METHOD_ERODE = 1,
+ MORPHING_METHOD_ERODE_ERODE = 2,
+ MORPHING_METHOD_ERODE_DILATE = 3
+ };
+
+ CvAdaptiveSkinDetector(int samplingDivider = 1, int morphingMethod = MORPHING_METHOD_NONE);
+ virtual ~CvAdaptiveSkinDetector();
+
+ virtual void process(IplImage *inputBGRImage, IplImage *outputHueMask);
+};
+
+
+/****************************************************************************************\
+ * Fuzzy MeanShift Tracker *
+ \****************************************************************************************/
+
+class CV_EXPORTS CvFuzzyPoint {
+public:
+ double x, y, value;
+
+ CvFuzzyPoint(double _x, double _y);
+};
+
+class CV_EXPORTS CvFuzzyCurve {
+private:
+ std::vector<CvFuzzyPoint> points;
+ double value, centre;
+
+ bool between(double x, double x1, double x2);
+
+public:
+ CvFuzzyCurve();
+ ~CvFuzzyCurve();
+
+ void setCentre(double _centre);
+ double getCentre();
+ void clear();
+ void addPoint(double x, double y);
+ double calcValue(double param);
+ double getValue();
+ void setValue(double _value);
+};
+
+class CV_EXPORTS CvFuzzyFunction {
+public:
+ std::vector<CvFuzzyCurve> curves;
+
+ CvFuzzyFunction();
+ ~CvFuzzyFunction();
+ void addCurve(CvFuzzyCurve *curve, double value = 0);
+ void resetValues();
+ double calcValue();
+ CvFuzzyCurve *newCurve();
+};
+
+class CV_EXPORTS CvFuzzyRule {
+private:
+ CvFuzzyCurve *fuzzyInput1, *fuzzyInput2;
+ CvFuzzyCurve *fuzzyOutput;
+public:
+ CvFuzzyRule();
+ ~CvFuzzyRule();
+ void setRule(CvFuzzyCurve *c1, CvFuzzyCurve *c2, CvFuzzyCurve *o1);
+ double calcValue(double param1, double param2);
+ CvFuzzyCurve *getOutputCurve();
+};
+
+class CV_EXPORTS CvFuzzyController {
+private:
+ std::vector<CvFuzzyRule*> rules;
+public:
+ CvFuzzyController();
+ ~CvFuzzyController();
+ void addRule(CvFuzzyCurve *c1, CvFuzzyCurve *c2, CvFuzzyCurve *o1);
+ double calcOutput(double param1, double param2);
+};
+
+class CV_EXPORTS CvFuzzyMeanShiftTracker
+{
+private:
+ class FuzzyResizer
+ {
+ private:
+ CvFuzzyFunction iInput, iOutput;
+ CvFuzzyController fuzzyController;
+ public:
+ FuzzyResizer();
+ int calcOutput(double edgeDensity, double density);
+ };
+
+ class SearchWindow
+ {
+ public:
+ FuzzyResizer *fuzzyResizer;
+ int x, y;
+ int width, height, maxWidth, maxHeight, ellipseHeight, ellipseWidth;
+ int ldx, ldy, ldw, ldh, numShifts, numIters;
+ int xGc, yGc;
+ long m00, m01, m10, m11, m02, m20;
+ double ellipseAngle;
+ double density;
+ unsigned int depthLow, depthHigh;
+ int verticalEdgeLeft, verticalEdgeRight, horizontalEdgeTop, horizontalEdgeBottom;
+
+ SearchWindow();
+ ~SearchWindow();
+ void setSize(int _x, int _y, int _width, int _height);
+ void initDepthValues(IplImage *maskImage, IplImage *depthMap);
+ bool shift();
+ void extractInfo(IplImage *maskImage, IplImage *depthMap, bool initDepth);
+ void getResizeAttribsEdgeDensityLinear(int &resizeDx, int &resizeDy, int &resizeDw, int &resizeDh);
+ void getResizeAttribsInnerDensity(int &resizeDx, int &resizeDy, int &resizeDw, int &resizeDh);
+ void getResizeAttribsEdgeDensityFuzzy(int &resizeDx, int &resizeDy, int &resizeDw, int &resizeDh);
+ bool meanShift(IplImage *maskImage, IplImage *depthMap, int maxIteration, bool initDepth);
+ };
+
+public:
+ enum TrackingState
+ {
+ tsNone = 0,
+ tsSearching = 1,
+ tsTracking = 2,
+ tsSetWindow = 3,
+ tsDisabled = 10
+ };
+
+ enum ResizeMethod {
+ rmEdgeDensityLinear = 0,
+ rmEdgeDensityFuzzy = 1,
+ rmInnerDensity = 2
+ };
+
+ enum {
+ MinKernelMass = 1000
+ };
+
+ SearchWindow kernel;
+ int searchMode;
+
+private:
+ enum
+ {
+ MaxMeanShiftIteration = 5,
+ MaxSetSizeIteration = 5
+ };
+
+ void findOptimumSearchWindow(SearchWindow &searchWindow, IplImage *maskImage, IplImage *depthMap, int maxIteration, int resizeMethod, bool initDepth);
+
+public:
+ CvFuzzyMeanShiftTracker();
+ ~CvFuzzyMeanShiftTracker();
+
+ void track(IplImage *maskImage, IplImage *depthMap, int resizeMethod, bool resetSearch, int minKernelMass = MinKernelMass);
+};
+
+
+namespace cv
+{
+
+ class CV_EXPORTS Octree
+ {
+ public:
+ struct Node
+ {
+ Node() {}
+ int begin, end;
+ float x_min, x_max, y_min, y_max, z_min, z_max;
+ int maxLevels;
+ bool isLeaf;
+ int children[8];
+ };
+
+ Octree();
+ Octree( const vector<Point3f>& points, int maxLevels = 10, int minPoints = 20 );
+ virtual ~Octree();
+
+ virtual void buildTree( const vector<Point3f>& points, int maxLevels = 10, int minPoints = 20 );
+ virtual void getPointsWithinSphere( const Point3f& center, float radius,
+ vector<Point3f>& points ) const;
+ const vector<Node>& getNodes() const { return nodes; }
+ private:
+ int minPoints;
+ vector<Point3f> points;
+ vector<Node> nodes;
+
+ virtual void buildNext(size_t node_ind);
+ };
+
+
+ class CV_EXPORTS Mesh3D
+ {
+ public:
+ struct EmptyMeshException {};
+
+ Mesh3D();
+ Mesh3D(const vector<Point3f>& vtx);
+ ~Mesh3D();
+
+ void buildOctree();
+ void clearOctree();
+ float estimateResolution(float tryRatio = 0.1f);
+ void computeNormals(float normalRadius, int minNeighbors = 20);
+ void computeNormals(const vector<int>& subset, float normalRadius, int minNeighbors = 20);
+
+ void writeAsVrml(const String& file, const vector<Scalar>& colors = vector<Scalar>()) const;
+
+ vector<Point3f> vtx;
+ vector<Point3f> normals;
+ float resolution;
+ Octree octree;
+
+ const static Point3f allzero;
+ };
+
+ class CV_EXPORTS SpinImageModel
+ {
+ public:
+
+ /* model parameters, leave unset for default or auto estimate */
+ float normalRadius;
+ int minNeighbors;
+
+ float binSize;
+ int imageWidth;
+
+ float lambda;
+ float gamma;
+
+ float T_GeometriccConsistency;
+ float T_GroupingCorespondances;
+
+ /* public interface */
+ SpinImageModel();
+ explicit SpinImageModel(const Mesh3D& mesh);
+ ~SpinImageModel();
+
+ void setLogger(std::ostream* log);
+ void selectRandomSubset(float ratio);
+ void setSubset(const vector<int>& subset);
+ void compute();
+
+ void match(const SpinImageModel& scene, vector< vector<Vec2i> >& result);
+
+ Mat packRandomScaledSpins(bool separateScale = false, size_t xCount = 10, size_t yCount = 10) const;
+
+ size_t getSpinCount() const { return spinImages.rows; }
+ Mat getSpinImage(size_t index) const { return spinImages.row((int)index); }
+ const Point3f& getSpinVertex(size_t index) const { return mesh.vtx[subset[index]]; }
+ const Point3f& getSpinNormal(size_t index) const { return mesh.normals[subset[index]]; }
+
+ const Mesh3D& getMesh() const { return mesh; }
+ Mesh3D& getMesh() { return mesh; }
+
+ /* static utility functions */
+ static bool spinCorrelation(const Mat& spin1, const Mat& spin2, float lambda, float& result);
+
+ static Point2f calcSpinMapCoo(const Point3f& point, const Point3f& vertex, const Point3f& normal);
+
+ static float geometricConsistency(const Point3f& pointScene1, const Point3f& normalScene1,
+ const Point3f& pointModel1, const Point3f& normalModel1,
+ const Point3f& pointScene2, const Point3f& normalScene2,
+ const Point3f& pointModel2, const Point3f& normalModel2);
+
+ static float groupingCreteria(const Point3f& pointScene1, const Point3f& normalScene1,
+ const Point3f& pointModel1, const Point3f& normalModel1,
+ const Point3f& pointScene2, const Point3f& normalScene2,
+ const Point3f& pointModel2, const Point3f& normalModel2,
+ float gamma);
+ protected:
+ void defaultParams();
+
+ void matchSpinToModel(const Mat& spin, vector<int>& indeces,
+ vector<float>& corrCoeffs, bool useExtremeOutliers = true) const;
+
+ void repackSpinImages(const vector<uchar>& mask, Mat& spinImages, bool reAlloc = true) const;
+
+ vector<int> subset;
+ Mesh3D mesh;
+ Mat spinImages;
+ std::ostream* out;
+ };
+
+ class CV_EXPORTS TickMeter
+ {
+ public:
+ TickMeter();
+ void start();
+ void stop();
+
+ int64 getTimeTicks() const;
+ double getTimeMicro() const;
+ double getTimeMilli() const;
+ double getTimeSec() const;
+ int64 getCounter() const;
+
+ void reset();
+ private:
+ int64 counter;
+ int64 sumTime;
+ int64 startTime;
+ };
+
+ CV_EXPORTS std::ostream& operator<<(std::ostream& out, const TickMeter& tm);
+
+ class CV_EXPORTS SelfSimDescriptor
+ {
+ public:
+ SelfSimDescriptor();
+ SelfSimDescriptor(int _ssize, int _lsize,
+ int _startDistanceBucket=DEFAULT_START_DISTANCE_BUCKET,
+ int _numberOfDistanceBuckets=DEFAULT_NUM_DISTANCE_BUCKETS,
+ int _nangles=DEFAULT_NUM_ANGLES);
+ SelfSimDescriptor(const SelfSimDescriptor& ss);
+ virtual ~SelfSimDescriptor();
+ SelfSimDescriptor& operator = (const SelfSimDescriptor& ss);
+
+ size_t getDescriptorSize() const;
+ Size getGridSize( Size imgsize, Size winStride ) const;
+
+ virtual void compute(const Mat& img, vector<float>& descriptors, Size winStride=Size(),
+ const vector<Point>& locations=vector<Point>()) const;
+ virtual void computeLogPolarMapping(Mat& mappingMask) const;
+ virtual void SSD(const Mat& img, Point pt, Mat& ssd) const;
+
+ int smallSize;
+ int largeSize;
+ int startDistanceBucket;
+ int numberOfDistanceBuckets;
+ int numberOfAngles;
+
+ enum { DEFAULT_SMALL_SIZE = 5, DEFAULT_LARGE_SIZE = 41,
+ DEFAULT_NUM_ANGLES = 20, DEFAULT_START_DISTANCE_BUCKET = 3,
+ DEFAULT_NUM_DISTANCE_BUCKETS = 7 };
+ };
+
+
+ typedef bool (*BundleAdjustCallback)(int iteration, double norm_error, void* user_data);
+
+ class CV_EXPORTS LevMarqSparse {
+ public:
+ LevMarqSparse();
+ LevMarqSparse(int npoints, // number of points
+ int ncameras, // number of cameras
+ int nPointParams, // number of params per one point (3 in case of 3D points)
+ int nCameraParams, // number of parameters per one camera
+ int nErrParams, // number of parameters in measurement vector
+ // for 1 point at one camera (2 in case of 2D projections)
+ Mat& visibility, // visibility matrix. rows correspond to points, columns correspond to cameras
+ // 1 - point is visible for the camera, 0 - invisible
+ Mat& P0, // starting vector of parameters, first cameras then points
+ Mat& X, // measurements, in order of visibility. non visible cases are skipped
+ TermCriteria criteria, // termination criteria
+
+ // callback for estimation of Jacobian matrices
+ void (CV_CDECL * fjac)(int i, int j, Mat& point_params,
+ Mat& cam_params, Mat& A, Mat& B, void* data),
+ // callback for estimation of backprojection errors
+ void (CV_CDECL * func)(int i, int j, Mat& point_params,
+ Mat& cam_params, Mat& estim, void* data),
+ void* data, // user-specific data passed to the callbacks
+ BundleAdjustCallback cb, void* user_data
+ );
+
+ virtual ~LevMarqSparse();
+
+ virtual void run( int npoints, // number of points
+ int ncameras, // number of cameras
+ int nPointParams, // number of params per one point (3 in case of 3D points)
+ int nCameraParams, // number of parameters per one camera
+ int nErrParams, // number of parameters in measurement vector
+ // for 1 point at one camera (2 in case of 2D projections)
+ Mat& visibility, // visibility matrix. rows correspond to points, columns correspond to cameras
+ // 1 - point is visible for the camera, 0 - invisible
+ Mat& P0, // starting vector of parameters, first cameras then points
+ Mat& X, // measurements, in order of visibility. non visible cases are skipped
+ TermCriteria criteria, // termination criteria
+
+ // callback for estimation of Jacobian matrices
+ void (CV_CDECL * fjac)(int i, int j, Mat& point_params,
+ Mat& cam_params, Mat& A, Mat& B, void* data),
+ // callback for estimation of backprojection errors
+ void (CV_CDECL * func)(int i, int j, Mat& point_params,
+ Mat& cam_params, Mat& estim, void* data),
+ void* data // user-specific data passed to the callbacks
+ );
+
+ virtual void clear();
+
+ // useful function to do simple bundle adjustment tasks
+ static void bundleAdjust(vector<Point3d>& points, // positions of points in global coordinate system (input and output)
+ const vector<vector<Point2d> >& imagePoints, // projections of 3d points for every camera
+ const vector<vector<int> >& visibility, // visibility of 3d points for every camera
+ vector<Mat>& cameraMatrix, // intrinsic matrices of all cameras (input and output)
+ vector<Mat>& R, // rotation matrices of all cameras (input and output)
+ vector<Mat>& T, // translation vector of all cameras (input and output)
+ vector<Mat>& distCoeffs, // distortion coefficients of all cameras (input and output)
+ const TermCriteria& criteria=
+ TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, DBL_EPSILON),
+ BundleAdjustCallback cb = 0, void* user_data = 0);
+
+ public:
+ virtual void optimize(CvMat &_vis); //main function that runs minimization
+
+ //iteratively asks for measurement for visible camera-point pairs
+ void ask_for_proj(CvMat &_vis,bool once=false);
+ //iteratively asks for Jacobians for every camera_point pair
+ void ask_for_projac(CvMat &_vis);
+
+ CvMat* err; //error X-hX
+ double prevErrNorm, errNorm;
+ double lambda;
+ CvTermCriteria criteria;
+ int iters;
+
+ CvMat** U; //size of array is equal to number of cameras
+ CvMat** V; //size of array is equal to number of points
+ CvMat** inv_V_star; //inverse of V*
+
+ CvMat** A;
+ CvMat** B;
+ CvMat** W;
+
+ CvMat* X; //measurement
+ CvMat* hX; //current measurement extimation given new parameter vector
+
+ CvMat* prevP; //current already accepted parameter.
+ CvMat* P; // parameters used to evaluate function with new params
+ // this parameters may be rejected
+
+ CvMat* deltaP; //computed increase of parameters (result of normal system solution )
+
+ CvMat** ea; // sum_i AijT * e_ij , used as right part of normal equation
+ // length of array is j = number of cameras
+ CvMat** eb; // sum_j BijT * e_ij , used as right part of normal equation
+ // length of array is i = number of points
+
+ CvMat** Yj; //length of array is i = num_points
+
+ CvMat* S; //big matrix of block Sjk , each block has size num_cam_params x num_cam_params
+
+ CvMat* JtJ_diag; //diagonal of JtJ, used to backup diagonal elements before augmentation
+
+ CvMat* Vis_index; // matrix which element is index of measurement for point i and camera j
+
+ int num_cams;
+ int num_points;
+ int num_err_param;
+ int num_cam_param;
+ int num_point_param;
+
+ //target function and jacobian pointers, which needs to be initialized
+ void (*fjac)(int i, int j, Mat& point_params, Mat& cam_params, Mat& A, Mat& B, void* data);
+ void (*func)(int i, int j, Mat& point_params, Mat& cam_params, Mat& estim, void* data);
+
+ void* data;
+
+ BundleAdjustCallback cb;
+ void* user_data;
+ };
+
+ CV_EXPORTS_W int chamerMatching( Mat& img, Mat& templ,
+ CV_OUT vector<vector<Point> >& results, CV_OUT vector<float>& cost,
+ double templScale=1, int maxMatches = 20,
+ double minMatchDistance = 1.0, int padX = 3,
+ int padY = 3, int scales = 5, double minScale = 0.6, double maxScale = 1.6,
+ double orientationWeight = 0.5, double truncate = 20);
+
+
+ class CV_EXPORTS_W StereoVar
+ {
+ public:
+ // Flags
+ enum {USE_INITIAL_DISPARITY = 1, USE_EQUALIZE_HIST = 2, USE_SMART_ID = 4, USE_AUTO_PARAMS = 8, USE_MEDIAN_FILTERING = 16};
+ enum {CYCLE_O, CYCLE_V};
+ enum {PENALIZATION_TICHONOV, PENALIZATION_CHARBONNIER, PENALIZATION_PERONA_MALIK};
+
+ //! the default constructor
+ CV_WRAP StereoVar();
+
+ //! the full constructor taking all the necessary algorithm parameters
+ CV_WRAP StereoVar(int levels, double pyrScale, int nIt, int minDisp, int maxDisp, int poly_n, double poly_sigma, float fi, float lambda, int penalization, int cycle, int flags);
+
+ //! the destructor
+ virtual ~StereoVar();
+
+ //! the stereo correspondence operator that computes disparity map for the specified rectified stereo pair
+ CV_WRAP_AS(compute) virtual void operator()(const Mat& left, const Mat& right, CV_OUT Mat& disp);
+
+ CV_PROP_RW int levels;
+ CV_PROP_RW double pyrScale;
+ CV_PROP_RW int nIt;
+ CV_PROP_RW int minDisp;
+ CV_PROP_RW int maxDisp;
+ CV_PROP_RW int poly_n;
+ CV_PROP_RW double poly_sigma;
+ CV_PROP_RW float fi;
+ CV_PROP_RW float lambda;
+ CV_PROP_RW int penalization;
+ CV_PROP_RW int cycle;
+ CV_PROP_RW int flags;
+
+ private:
+ void autoParams();
+ void FMG(Mat &I1, Mat &I2, Mat &I2x, Mat &u, int level);
+ void VCycle_MyFAS(Mat &I1_h, Mat &I2_h, Mat &I2x_h, Mat &u_h, int level);
+ void VariationalSolver(Mat &I1_h, Mat &I2_h, Mat &I2x_h, Mat &u_h, int level);
+ };
+
+ CV_EXPORTS void polyfit(const Mat& srcx, const Mat& srcy, Mat& dst, int order);
+
+ class CV_EXPORTS Directory
+ {
+ public:
+ static std::vector<std::string> GetListFiles ( const std::string& path, const std::string & exten = "*", bool addPath = true );
+ static std::vector<std::string> GetListFilesR ( const std::string& path, const std::string & exten = "*", bool addPath = true );
+ static std::vector<std::string> GetListFolders( const std::string& path, const std::string & exten = "*", bool addPath = true );
+ };
+
+ /*
+ * Generation of a set of different colors by the following way:
+ * 1) generate more then need colors (in "factor" times) in RGB,
+ * 2) convert them to Lab,
+ * 3) choose the needed count of colors from the set that are more different from
+ * each other,
+ * 4) convert the colors back to RGB
+ */
+ CV_EXPORTS void generateColors( std::vector<Scalar>& colors, size_t count, size_t factor=100 );
+
+
+ /*
+ * Estimate the rigid body motion from frame0 to frame1. The method is based on the paper
+ * "Real-Time Visual Odometry from Dense RGB-D Images", F. Steinbucker, J. Strum, D. Cremers, ICCV, 2011.
+ */
+ enum { ROTATION = 1,
+ TRANSLATION = 2,
+ RIGID_BODY_MOTION = 4
+ };
+ CV_EXPORTS bool RGBDOdometry( Mat& Rt, const Mat& initRt,
+ const Mat& image0, const Mat& depth0, const Mat& mask0,
+ const Mat& image1, const Mat& depth1, const Mat& mask1,
+ const Mat& cameraMatrix, float minDepth=0.f, float maxDepth=4.f, float maxDepthDiff=0.07f,
+ const std::vector<int>& iterCounts=std::vector<int>(),
+ const std::vector<float>& minGradientMagnitudes=std::vector<float>(),
+ int transformType=RIGID_BODY_MOTION );
+
+ /**
+ *Bilinear interpolation technique.
+ *
+ *The value of a desired cortical pixel is obtained through a bilinear interpolation of the values
+ *of the four nearest neighbouring Cartesian pixels to the center of the RF.
+ *The same principle is applied to the inverse transformation.
+ *
+ *More details can be found in http://dx.doi.org/10.1007/978-3-642-23968-7_5
+ */
+ class CV_EXPORTS LogPolar_Interp
+ {
+ public:
+
+ LogPolar_Interp() {}
+
+ /**
+ *Constructor
+ *\param w the width of the input image
+ *\param h the height of the input image
+ *\param center the transformation center: where the output precision is maximal
+ *\param R the number of rings of the cortical image (default value 70 pixel)
+ *\param ro0 the radius of the blind spot (default value 3 pixel)
+ *\param interp interpolation algorithm
+ *\param full \a 1 (default value) means that the retinal image (the inverse transform) is computed within the circumscribing circle.
+ * \a 0 means that the retinal image is computed within the inscribed circle.
+ *\param S the number of sectors of the cortical image (default value 70 pixel).
+ * Its value is usually internally computed to obtain a pixel aspect ratio equals to 1.
+ *\param sp \a 1 (default value) means that the parameter \a S is internally computed.
+ * \a 0 means that the parameter \a S is provided by the user.
+ */
+ LogPolar_Interp(int w, int h, Point2i center, int R=70, double ro0=3.0,
+ int interp=INTER_LINEAR, int full=1, int S=117, int sp=1);
+ /**
+ *Transformation from Cartesian image to cortical (log-polar) image.
+ *\param source the Cartesian image
+ *\return the transformed image (cortical image)
+ */
+ const Mat to_cortical(const Mat &source);
+ /**
+ *Transformation from cortical image to retinal (inverse log-polar) image.
+ *\param source the cortical image
+ *\return the transformed image (retinal image)
+ */
+ const Mat to_cartesian(const Mat &source);
+ /**
+ *Destructor
+ */
+ ~LogPolar_Interp();
+
+ protected:
+
+ Mat Rsri;
+ Mat Csri;
+
+ int S, R, M, N;
+ int top, bottom,left,right;
+ double ro0, romax, a, q;
+ int interp;
+
+ Mat ETAyx;
+ Mat CSIyx;
+
+ void create_map(int M, int N, int R, int S, double ro0);
+ };
+
+ /**
+ *Overlapping circular receptive fields technique
+ *
+ *The Cartesian plane is divided in two regions: the fovea and the periphery.
+ *The fovea (oversampling) is handled by using the bilinear interpolation technique described above, whereas in
+ *the periphery we use the overlapping Gaussian circular RFs.
+ *
+ *More details can be found in http://dx.doi.org/10.1007/978-3-642-23968-7_5
+ */
+ class CV_EXPORTS LogPolar_Overlapping
+ {
+ public:
+ LogPolar_Overlapping() {}
+
+ /**
+ *Constructor
+ *\param w the width of the input image
+ *\param h the height of the input image
+ *\param center the transformation center: where the output precision is maximal
+ *\param R the number of rings of the cortical image (default value 70 pixel)
+ *\param ro0 the radius of the blind spot (default value 3 pixel)
+ *\param full \a 1 (default value) means that the retinal image (the inverse transform) is computed within the circumscribing circle.
+ * \a 0 means that the retinal image is computed within the inscribed circle.
+ *\param S the number of sectors of the cortical image (default value 70 pixel).
+ * Its value is usually internally computed to obtain a pixel aspect ratio equals to 1.
+ *\param sp \a 1 (default value) means that the parameter \a S is internally computed.
+ * \a 0 means that the parameter \a S is provided by the user.
+ */
+ LogPolar_Overlapping(int w, int h, Point2i center, int R=70,
+ double ro0=3.0, int full=1, int S=117, int sp=1);
+ /**
+ *Transformation from Cartesian image to cortical (log-polar) image.
+ *\param source the Cartesian image
+ *\return the transformed image (cortical image)
+ */
+ const Mat to_cortical(const Mat &source);
+ /**
+ *Transformation from cortical image to retinal (inverse log-polar) image.
+ *\param source the cortical image
+ *\return the transformed image (retinal image)
+ */
+ const Mat to_cartesian(const Mat &source);
+ /**
+ *Destructor
+ */
+ ~LogPolar_Overlapping();
+
+ protected:
+
+ Mat Rsri;
+ Mat Csri;
+ vector<int> Rsr;
+ vector<int> Csr;
+ vector<double> Wsr;
+
+ int S, R, M, N, ind1;
+ int top, bottom,left,right;
+ double ro0, romax, a, q;
+
+ struct kernel
+ {
+ kernel() { w = 0; }
+ vector<double> weights;
+ int w;
+ };
+
+ Mat ETAyx;
+ Mat CSIyx;
+ vector<kernel> w_ker_2D;
+
+ void create_map(int M, int N, int R, int S, double ro0);
+ };
+
+ /**
+ * Adjacent receptive fields technique
+ *
+ *All the Cartesian pixels, whose coordinates in the cortical domain share the same integer part, are assigned to the same RF.
+ *The precision of the boundaries of the RF can be improved by breaking each pixel into subpixels and assigning each of them to the correct RF.
+ *This technique is implemented from: Traver, V., Pla, F.: Log-polar mapping template design: From task-level requirements
+ *to geometry parameters. Image Vision Comput. 26(10) (2008) 1354-1370
+ *
+ *More details can be found in http://dx.doi.org/10.1007/978-3-642-23968-7_5
+ */
+ class CV_EXPORTS LogPolar_Adjacent
+ {
+ public:
+ LogPolar_Adjacent() {}
+
+ /**
+ *Constructor
+ *\param w the width of the input image
+ *\param h the height of the input image
+ *\param center the transformation center: where the output precision is maximal
+ *\param R the number of rings of the cortical image (default value 70 pixel)
+ *\param ro0 the radius of the blind spot (default value 3 pixel)
+ *\param smin the size of the subpixel (default value 0.25 pixel)
+ *\param full \a 1 (default value) means that the retinal image (the inverse transform) is computed within the circumscribing circle.
+ * \a 0 means that the retinal image is computed within the inscribed circle.
+ *\param S the number of sectors of the cortical image (default value 70 pixel).
+ * Its value is usually internally computed to obtain a pixel aspect ratio equals to 1.
+ *\param sp \a 1 (default value) means that the parameter \a S is internally computed.
+ * \a 0 means that the parameter \a S is provided by the user.
+ */
+ LogPolar_Adjacent(int w, int h, Point2i center, int R=70, double ro0=3.0, double smin=0.25, int full=1, int S=117, int sp=1);
+ /**
+ *Transformation from Cartesian image to cortical (log-polar) image.
+ *\param source the Cartesian image
+ *\return the transformed image (cortical image)
+ */
+ const Mat to_cortical(const Mat &source);
+ /**
+ *Transformation from cortical image to retinal (inverse log-polar) image.
+ *\param source the cortical image
+ *\return the transformed image (retinal image)
+ */
+ const Mat to_cartesian(const Mat &source);
+ /**
+ *Destructor
+ */
+ ~LogPolar_Adjacent();
+
+ protected:
+ struct pixel
+ {
+ pixel() { u = v = 0; a = 0.; }
+ int u;
+ int v;
+ double a;
+ };
+ int S, R, M, N;
+ int top, bottom,left,right;
+ double ro0, romax, a, q;
+ vector<vector<pixel> > L;
+ vector<double> A;
+
+ void subdivide_recursively(double x, double y, int i, int j, double length, double smin);
+ bool get_uv(double x, double y, int&u, int&v);
+ void create_map(int M, int N, int R, int S, double ro0, double smin);
+ };
+
+ CV_EXPORTS Mat subspaceProject(InputArray W, InputArray mean, InputArray src);
+ CV_EXPORTS Mat subspaceReconstruct(InputArray W, InputArray mean, InputArray src);
+
+ class CV_EXPORTS LDA
+ {
+ public:
+ // Initializes a LDA with num_components (default 0).
+ LDA(int num_components = 0) :
+ _num_components(num_components) {};
+
+ // Initializes and performs a Discriminant Analysis with Fisher's
+ // Optimization Criterion on given data in src and corresponding labels
+ // in labels. If 0 (or less) number of components are given, they are
+ // automatically determined for given data in computation.
+ LDA(const Mat& src, vector<int> labels,
+ int num_components = 0) :
+ _num_components(num_components)
+ {
+ this->compute(src, labels); //! compute eigenvectors and eigenvalues
+ }
+
+ // Initializes and performs a Discriminant Analysis with Fisher's
+ // Optimization Criterion on given data in src and corresponding labels
+ // in labels. If 0 (or less) number of components are given, they are
+ // automatically determined for given data in computation.
+ LDA(InputArrayOfArrays src, InputArray labels,
+ int num_components = 0) :
+ _num_components(num_components)
+ {
+ this->compute(src, labels); //! compute eigenvectors and eigenvalues
+ }
+
+ // Serializes this object to a given filename.
+ void save(const string& filename) const;
+
+ // Deserializes this object from a given filename.
+ void load(const string& filename);
+
+ // Serializes this object to a given cv::FileStorage.
+ void save(FileStorage& fs) const;
+
+ // Deserializes this object from a given cv::FileStorage.
+ void load(const FileStorage& node);
+
+ // Destructor.
+ ~LDA() {}
+
+ /** Compute the discriminants for data in src (row aligned) and labels.
+ */
+ void compute(InputArrayOfArrays src, InputArray labels);
+
+ /** Projects samples into the LDA subspace.
+ src may be one or more row aligned samples.
+ */
+ Mat project(InputArray src);
+
+ /** Reconstructs projections from the LDA subspace.
+ src may be one or more row aligned projections.
+ */
+ Mat reconstruct(InputArray src);
+
+ // Returns the eigenvectors of this LDA.
+ Mat eigenvectors() const { return _eigenvectors; };
+
+ // Returns the eigenvalues of this LDA.
+ Mat eigenvalues() const { return _eigenvalues; }
+
+ protected:
+ bool _dataAsRow; // unused, but needed for ABI compatibility.
+ int _num_components;
+ Mat _eigenvectors;
+ Mat _eigenvalues;
+
+ void lda(InputArrayOfArrays src, InputArray labels);
+ };
+
+ class CV_EXPORTS_W FaceRecognizer : public Algorithm
+ {
+ public:
+ //! virtual destructor
+ virtual ~FaceRecognizer() {}
+
+ // Trains a FaceRecognizer.
+ CV_WRAP virtual void train(InputArrayOfArrays src, InputArray labels) = 0;
+
+ // Updates a FaceRecognizer.
+ CV_WRAP void update(InputArrayOfArrays src, InputArray labels);
+
+ // Gets a prediction from a FaceRecognizer.
+ virtual int predict(InputArray src) const = 0;
+
+ // Predicts the label and confidence for a given sample.
+ CV_WRAP virtual void predict(InputArray src, CV_OUT int &label, CV_OUT double &confidence) const = 0;
+
+ // Serializes this object to a given filename.
+ CV_WRAP virtual void save(const string& filename) const;
+
+ // Deserializes this object from a given filename.
+ CV_WRAP virtual void load(const string& filename);
+
+ // Serializes this object to a given cv::FileStorage.
+ virtual void save(FileStorage& fs) const = 0;
+
+ // Deserializes this object from a given cv::FileStorage.
+ virtual void load(const FileStorage& fs) = 0;
+
+ // Sets additional information as pairs label - info.
+ void setLabelsInfo(const std::map<int, string>& labelsInfo);
+
+ // Gets string information by label
+ string getLabelInfo(const int &label);
+
+ // Gets labels by string
+ vector<int> getLabelsByString(const string& str);
+ };
+
+ CV_EXPORTS_W Ptr<FaceRecognizer> createEigenFaceRecognizer(int num_components = 0, double threshold = DBL_MAX);
+ CV_EXPORTS_W Ptr<FaceRecognizer> createFisherFaceRecognizer(int num_components = 0, double threshold = DBL_MAX);
+ CV_EXPORTS_W Ptr<FaceRecognizer> createLBPHFaceRecognizer(int radius=1, int neighbors=8,
+ int grid_x=8, int grid_y=8, double threshold = DBL_MAX);
+
+ enum
+ {
+ COLORMAP_AUTUMN = 0,
+ COLORMAP_BONE = 1,
+ COLORMAP_JET = 2,
+ COLORMAP_WINTER = 3,
+ COLORMAP_RAINBOW = 4,
+ COLORMAP_OCEAN = 5,
+ COLORMAP_SUMMER = 6,
+ COLORMAP_SPRING = 7,
+ COLORMAP_COOL = 8,
+ COLORMAP_HSV = 9,
+ COLORMAP_PINK = 10,
+ COLORMAP_HOT = 11
+ };
+
+ CV_EXPORTS_W void applyColorMap(InputArray src, OutputArray dst, int colormap);
+
+ CV_EXPORTS bool initModule_contrib();
+}
+
+#include "opencv2/contrib/retina.hpp"
+
+#include "opencv2/contrib/openfabmap.hpp"
+
+#endif
+
+#endif
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/contrib/detection_based_tracker.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/contrib/detection_based_tracker.hpp
new file mode 100644
index 00000000..56aa1ccb
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/contrib/detection_based_tracker.hpp
@@ -0,0 +1,106 @@
+#pragma once
+
+#if defined(__linux__) || defined(LINUX) || defined(__APPLE__) || defined(ANDROID)
+
+#include <opencv2/core/core.hpp>
+#include <opencv2/objdetect/objdetect.hpp>
+
+#include <vector>
+
+class DetectionBasedTracker
+{
+ public:
+ struct Parameters
+ {
+ int minObjectSize;
+ int maxObjectSize;
+ double scaleFactor;
+ int maxTrackLifetime;
+ int minNeighbors;
+ int minDetectionPeriod; //the minimal time between run of the big object detector (on the whole frame) in ms (1000 mean 1 sec), default=0
+
+ Parameters();
+ };
+
+ DetectionBasedTracker(const std::string& cascadeFilename, const Parameters& params);
+ virtual ~DetectionBasedTracker();
+
+ virtual bool run();
+ virtual void stop();
+ virtual void resetTracking();
+
+ virtual void process(const cv::Mat& imageGray);
+
+ bool setParameters(const Parameters& params);
+ const Parameters& getParameters();
+
+
+ typedef std::pair<cv::Rect, int> Object;
+ virtual void getObjects(std::vector<cv::Rect>& result) const;
+ virtual void getObjects(std::vector<Object>& result) const;
+
+ protected:
+ class SeparateDetectionWork;
+ cv::Ptr<SeparateDetectionWork> separateDetectionWork;
+ friend void* workcycleObjectDetectorFunction(void* p);
+
+
+ struct InnerParameters
+ {
+ int numLastPositionsToTrack;
+ int numStepsToWaitBeforeFirstShow;
+ int numStepsToTrackWithoutDetectingIfObjectHasNotBeenShown;
+ int numStepsToShowWithoutDetecting;
+
+ float coeffTrackingWindowSize;
+ float coeffObjectSizeToTrack;
+ float coeffObjectSpeedUsingInPrediction;
+
+ InnerParameters();
+ };
+ Parameters parameters;
+ InnerParameters innerParameters;
+
+ struct TrackedObject
+ {
+ typedef std::vector<cv::Rect> PositionsVector;
+
+ PositionsVector lastPositions;
+
+ int numDetectedFrames;
+ int numFramesNotDetected;
+ int id;
+
+ TrackedObject(const cv::Rect& rect):numDetectedFrames(1), numFramesNotDetected(0)
+ {
+ lastPositions.push_back(rect);
+ id=getNextId();
+ };
+
+ static int getNextId()
+ {
+ static int _id=0;
+ return _id++;
+ }
+ };
+
+ int numTrackedSteps;
+ std::vector<TrackedObject> trackedObjects;
+
+ std::vector<float> weightsPositionsSmoothing;
+ std::vector<float> weightsSizesSmoothing;
+
+ cv::CascadeClassifier cascadeForTracking;
+
+
+ void updateTrackedObjects(const std::vector<cv::Rect>& detectedObjects);
+ cv::Rect calcTrackedObjectPositionToShow(int i) const;
+ void detectInRegion(const cv::Mat& img, const cv::Rect& r, std::vector<cv::Rect>& detectedObjectsInRegions);
+};
+
+namespace cv
+{
+ using ::DetectionBasedTracker;
+} //end of cv namespace
+
+#endif
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/contrib/hybridtracker.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/contrib/hybridtracker.hpp
new file mode 100644
index 00000000..3a1f722d
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/contrib/hybridtracker.hpp
@@ -0,0 +1,220 @@
+//*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2008-2011, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of Intel Corporation may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_HYBRIDTRACKER_H_
+#define __OPENCV_HYBRIDTRACKER_H_
+
+#include "opencv2/core/core.hpp"
+#include "opencv2/core/operations.hpp"
+#include "opencv2/imgproc/imgproc.hpp"
+#include "opencv2/features2d/features2d.hpp"
+#include "opencv2/video/tracking.hpp"
+#include "opencv2/ml/ml.hpp"
+
+#ifdef __cplusplus
+
+namespace cv
+{
+
+// Motion model for tracking algorithm. Currently supports objects that do not move much.
+// To add Kalman filter
+struct CV_EXPORTS CvMotionModel
+{
+ enum {LOW_PASS_FILTER = 0, KALMAN_FILTER = 1, EM = 2};
+
+ CvMotionModel()
+ {
+ }
+
+ float low_pass_gain; // low pass gain
+};
+
+// Mean Shift Tracker parameters for specifying use of HSV channel and CamShift parameters.
+struct CV_EXPORTS CvMeanShiftTrackerParams
+{
+ enum { H = 0, HS = 1, HSV = 2 };
+ CvMeanShiftTrackerParams(int tracking_type = CvMeanShiftTrackerParams::HS,
+ CvTermCriteria term_crit = CvTermCriteria());
+
+ int tracking_type;
+ vector<float> h_range;
+ vector<float> s_range;
+ vector<float> v_range;
+ CvTermCriteria term_crit;
+};
+
+// Feature tracking parameters
+struct CV_EXPORTS CvFeatureTrackerParams
+{
+ enum { SIFT = 0, SURF = 1, OPTICAL_FLOW = 2 };
+ CvFeatureTrackerParams(int featureType = 0, int windowSize = 0)
+ {
+ feature_type = featureType;
+ window_size = windowSize;
+ }
+
+ int feature_type; // Feature type to use
+ int window_size; // Window size in pixels around which to search for new window
+};
+
+// Hybrid Tracking parameters for specifying weights of individual trackers and motion model.
+struct CV_EXPORTS CvHybridTrackerParams
+{
+ CvHybridTrackerParams(float ft_tracker_weight = 0.5, float ms_tracker_weight = 0.5,
+ CvFeatureTrackerParams ft_params = CvFeatureTrackerParams(),
+ CvMeanShiftTrackerParams ms_params = CvMeanShiftTrackerParams(),
+ CvMotionModel model = CvMotionModel());
+
+ float ft_tracker_weight;
+ float ms_tracker_weight;
+ CvFeatureTrackerParams ft_params;
+ CvMeanShiftTrackerParams ms_params;
+ int motion_model;
+ float low_pass_gain;
+};
+
+// Performs Camshift using parameters from MeanShiftTrackerParams
+class CV_EXPORTS CvMeanShiftTracker
+{
+private:
+ Mat hsv, hue;
+ Mat backproj;
+ Mat mask, maskroi;
+ MatND hist;
+ Rect prev_trackwindow;
+ RotatedRect prev_trackbox;
+ Point2f prev_center;
+
+public:
+ CvMeanShiftTrackerParams params;
+
+ CvMeanShiftTracker();
+ explicit CvMeanShiftTracker(CvMeanShiftTrackerParams _params);
+ ~CvMeanShiftTracker();
+ void newTrackingWindow(Mat image, Rect selection);
+ RotatedRect updateTrackingWindow(Mat image);
+ Mat getHistogramProjection(int type);
+ void setTrackingWindow(Rect _window);
+ Rect getTrackingWindow();
+ RotatedRect getTrackingEllipse();
+ Point2f getTrackingCenter();
+};
+
+// Performs SIFT/SURF feature tracking using parameters from FeatureTrackerParams
+class CV_EXPORTS CvFeatureTracker
+{
+private:
+ Ptr<Feature2D> dd;
+ Ptr<DescriptorMatcher> matcher;
+ vector<DMatch> matches;
+
+ Mat prev_image;
+ Mat prev_image_bw;
+ Rect prev_trackwindow;
+ Point2d prev_center;
+
+ int ittr;
+ vector<Point2f> features[2];
+
+public:
+ Mat disp_matches;
+ CvFeatureTrackerParams params;
+
+ CvFeatureTracker();
+ explicit CvFeatureTracker(CvFeatureTrackerParams params);
+ ~CvFeatureTracker();
+ void newTrackingWindow(Mat image, Rect selection);
+ Rect updateTrackingWindow(Mat image);
+ Rect updateTrackingWindowWithSIFT(Mat image);
+ Rect updateTrackingWindowWithFlow(Mat image);
+ void setTrackingWindow(Rect _window);
+ Rect getTrackingWindow();
+ Point2f getTrackingCenter();
+};
+
+// Performs Hybrid Tracking and combines individual trackers using EM or filters
+class CV_EXPORTS CvHybridTracker
+{
+private:
+ CvMeanShiftTracker* mstracker;
+ CvFeatureTracker* fttracker;
+
+ CvMat* samples;
+ CvMat* labels;
+
+ Rect prev_window;
+ Point2f prev_center;
+ Mat prev_proj;
+ RotatedRect trackbox;
+
+ int ittr;
+ Point2f curr_center;
+
+ inline float getL2Norm(Point2f p1, Point2f p2);
+ Mat getDistanceProjection(Mat image, Point2f center);
+ Mat getGaussianProjection(Mat image, int ksize, double sigma, Point2f center);
+ void updateTrackerWithEM(Mat image);
+ void updateTrackerWithLowPassFilter(Mat image);
+
+public:
+ CvHybridTrackerParams params;
+ CvHybridTracker();
+ explicit CvHybridTracker(CvHybridTrackerParams params);
+ ~CvHybridTracker();
+
+ void newTracker(Mat image, Rect selection);
+ void updateTracker(Mat image);
+ Rect getTrackingWindow();
+};
+
+typedef CvMotionModel MotionModel;
+typedef CvMeanShiftTrackerParams MeanShiftTrackerParams;
+typedef CvFeatureTrackerParams FeatureTrackerParams;
+typedef CvHybridTrackerParams HybridTrackerParams;
+typedef CvMeanShiftTracker MeanShiftTracker;
+typedef CvFeatureTracker FeatureTracker;
+typedef CvHybridTracker HybridTracker;
+}
+
+#endif
+
+#endif
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/contrib/openfabmap.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/contrib/openfabmap.hpp
new file mode 100644
index 00000000..6b2834ed
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/contrib/openfabmap.hpp
@@ -0,0 +1,405 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+// This file originates from the openFABMAP project:
+// [http://code.google.com/p/openfabmap/]
+//
+// For published work which uses all or part of OpenFABMAP, please cite:
+// [http://ieeexplore.ieee.org/xpl/articleDetails.jsp?arnumber=6224843]
+//
+// Original Algorithm by Mark Cummins and Paul Newman:
+// [http://ijr.sagepub.com/content/27/6/647.short]
+// [http://ieeexplore.ieee.org/xpl/articleDetails.jsp?arnumber=5613942]
+// [http://ijr.sagepub.com/content/30/9/1100.abstract]
+//
+// License Agreement
+//
+// Copyright (C) 2012 Arren Glover [aj.glover@qut.edu.au] and
+// Will Maddern [w.maddern@qut.edu.au], all rights reserved.
+//
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_OPENFABMAP_H_
+#define __OPENCV_OPENFABMAP_H_
+
+#include "opencv2/core/core.hpp"
+#include "opencv2/features2d/features2d.hpp"
+
+#include <vector>
+#include <list>
+#include <map>
+#include <set>
+#include <valarray>
+
+namespace cv {
+
+namespace of2 {
+
+using std::list;
+using std::map;
+using std::multiset;
+
+/*
+ Return data format of a FABMAP compare call
+*/
+struct CV_EXPORTS IMatch {
+
+ IMatch() :
+ queryIdx(-1), imgIdx(-1), likelihood(-DBL_MAX), match(-DBL_MAX) {
+ }
+ IMatch(int _queryIdx, int _imgIdx, double _likelihood, double _match) :
+ queryIdx(_queryIdx), imgIdx(_imgIdx), likelihood(_likelihood), match(
+ _match) {
+ }
+
+ int queryIdx; //query index
+ int imgIdx; //test index
+
+ double likelihood; //raw loglikelihood
+ double match; //normalised probability
+
+ bool operator<(const IMatch& m) const {
+ return match < m.match;
+ }
+
+};
+
+/*
+ Base FabMap class. Each FabMap method inherits from this class.
+*/
+class CV_EXPORTS FabMap {
+public:
+
+ //FabMap options
+ enum {
+ MEAN_FIELD = 1,
+ SAMPLED = 2,
+ NAIVE_BAYES = 4,
+ CHOW_LIU = 8,
+ MOTION_MODEL = 16
+ };
+
+ FabMap(const Mat& clTree, double PzGe, double PzGNe, int flags,
+ int numSamples = 0);
+ virtual ~FabMap();
+
+ //methods to add training data for sampling method
+ virtual void addTraining(const Mat& queryImgDescriptor);
+ virtual void addTraining(const vector<Mat>& queryImgDescriptors);
+
+ //methods to add to the test data
+ virtual void add(const Mat& queryImgDescriptor);
+ virtual void add(const vector<Mat>& queryImgDescriptors);
+
+ //accessors
+ const vector<Mat>& getTrainingImgDescriptors() const;
+ const vector<Mat>& getTestImgDescriptors() const;
+
+ //Main FabMap image comparison
+ void compare(const Mat& queryImgDescriptor,
+ vector<IMatch>& matches, bool addQuery = false,
+ const Mat& mask = Mat());
+ void compare(const Mat& queryImgDescriptor,
+ const Mat& testImgDescriptors, vector<IMatch>& matches,
+ const Mat& mask = Mat());
+ void compare(const Mat& queryImgDescriptor,
+ const vector<Mat>& testImgDescriptors,
+ vector<IMatch>& matches, const Mat& mask = Mat());
+ void compare(const vector<Mat>& queryImgDescriptors, vector<
+ IMatch>& matches, bool addQuery = false, const Mat& mask =
+ Mat());
+ void compare(const vector<Mat>& queryImgDescriptors,
+ const vector<Mat>& testImgDescriptors,
+ vector<IMatch>& matches, const Mat& mask = Mat());
+
+protected:
+
+ void compareImgDescriptor(const Mat& queryImgDescriptor,
+ int queryIndex, const vector<Mat>& testImgDescriptors,
+ vector<IMatch>& matches);
+
+ void addImgDescriptor(const Mat& queryImgDescriptor);
+
+ //the getLikelihoods method is overwritten for each different FabMap
+ //method.
+ virtual void getLikelihoods(const Mat& queryImgDescriptor,
+ const vector<Mat>& testImgDescriptors,
+ vector<IMatch>& matches);
+ virtual double getNewPlaceLikelihood(const Mat& queryImgDescriptor);
+
+ //turn likelihoods into probabilities (also add in motion model if used)
+ void normaliseDistribution(vector<IMatch>& matches);
+
+ //Chow-Liu Tree
+ int pq(int q);
+ double Pzq(int q, bool zq);
+ double PzqGzpq(int q, bool zq, bool zpq);
+
+ //FAB-MAP Core
+ double PzqGeq(bool zq, bool eq);
+ double PeqGL(int q, bool Lzq, bool eq);
+ double PzqGL(int q, bool zq, bool zpq, bool Lzq);
+ double PzqGzpqL(int q, bool zq, bool zpq, bool Lzq);
+ double (FabMap::*PzGL)(int q, bool zq, bool zpq, bool Lzq);
+
+ //data
+ Mat clTree;
+ vector<Mat> trainingImgDescriptors;
+ vector<Mat> testImgDescriptors;
+ vector<IMatch> priorMatches;
+
+ //parameters
+ double PzGe;
+ double PzGNe;
+ double Pnew;
+
+ double mBias;
+ double sFactor;
+
+ int flags;
+ int numSamples;
+
+};
+
+/*
+ The original FAB-MAP algorithm, developed based on:
+ http://ijr.sagepub.com/content/27/6/647.short
+*/
+class CV_EXPORTS FabMap1: public FabMap {
+public:
+ FabMap1(const Mat& clTree, double PzGe, double PzGNe, int flags,
+ int numSamples = 0);
+ virtual ~FabMap1();
+protected:
+
+ //FabMap1 implementation of likelihood comparison
+ void getLikelihoods(const Mat& queryImgDescriptor, const vector<
+ Mat>& testImgDescriptors, vector<IMatch>& matches);
+};
+
+/*
+ A computationally faster version of the original FAB-MAP algorithm. A look-
+ up-table is used to precompute many of the reoccuring calculations
+*/
+class CV_EXPORTS FabMapLUT: public FabMap {
+public:
+ FabMapLUT(const Mat& clTree, double PzGe, double PzGNe,
+ int flags, int numSamples = 0, int precision = 6);
+ virtual ~FabMapLUT();
+protected:
+
+ //FabMap look-up-table implementation of the likelihood comparison
+ void getLikelihoods(const Mat& queryImgDescriptor, const vector<
+ Mat>& testImgDescriptors, vector<IMatch>& matches);
+
+ //precomputed data
+ int (*table)[8];
+
+ //data precision
+ int precision;
+};
+
+/*
+ The Accelerated FAB-MAP algorithm, developed based on:
+ http://ieeexplore.ieee.org/xpl/articleDetails.jsp?arnumber=5613942
+*/
+class CV_EXPORTS FabMapFBO: public FabMap {
+public:
+ FabMapFBO(const Mat& clTree, double PzGe, double PzGNe, int flags,
+ int numSamples = 0, double rejectionThreshold = 1e-8, double PsGd =
+ 1e-8, int bisectionStart = 512, int bisectionIts = 9);
+ virtual ~FabMapFBO();
+
+protected:
+
+ //FabMap Fast Bail-out implementation of the likelihood comparison
+ void getLikelihoods(const Mat& queryImgDescriptor, const vector<
+ Mat>& testImgDescriptors, vector<IMatch>& matches);
+
+ //stucture used to determine word comparison order
+ struct WordStats {
+ WordStats() :
+ q(0), info(0), V(0), M(0) {
+ }
+
+ WordStats(int _q, double _info) :
+ q(_q), info(_info), V(0), M(0) {
+ }
+
+ int q;
+ double info;
+ mutable double V;
+ mutable double M;
+
+ bool operator<(const WordStats& w) const {
+ return info < w.info;
+ }
+
+ };
+
+ //private fast bail-out necessary functions
+ void setWordStatistics(const Mat& queryImgDescriptor, multiset<WordStats>& wordData);
+ double limitbisection(double v, double m);
+ double bennettInequality(double v, double m, double delta);
+ static bool compInfo(const WordStats& first, const WordStats& second);
+
+ //parameters
+ double PsGd;
+ double rejectionThreshold;
+ int bisectionStart;
+ int bisectionIts;
+};
+
+/*
+ The FAB-MAP2.0 algorithm, developed based on:
+ http://ijr.sagepub.com/content/30/9/1100.abstract
+*/
+class CV_EXPORTS FabMap2: public FabMap {
+public:
+
+ FabMap2(const Mat& clTree, double PzGe, double PzGNe, int flags);
+ virtual ~FabMap2();
+
+ //FabMap2 builds the inverted index and requires an additional training/test
+ //add function
+ void addTraining(const Mat& queryImgDescriptors) {
+ FabMap::addTraining(queryImgDescriptors);
+ }
+ void addTraining(const vector<Mat>& queryImgDescriptors);
+
+ void add(const Mat& queryImgDescriptors) {
+ FabMap::add(queryImgDescriptors);
+ }
+ void add(const vector<Mat>& queryImgDescriptors);
+
+protected:
+
+ //FabMap2 implementation of the likelihood comparison
+ void getLikelihoods(const Mat& queryImgDescriptor, const vector<
+ Mat>& testImgDescriptors, vector<IMatch>& matches);
+ double getNewPlaceLikelihood(const Mat& queryImgDescriptor);
+
+ //the likelihood function using the inverted index
+ void getIndexLikelihoods(const Mat& queryImgDescriptor, vector<
+ double>& defaults, map<int, vector<int> >& invertedMap,
+ vector<IMatch>& matches);
+ void addToIndex(const Mat& queryImgDescriptor,
+ vector<double>& defaults,
+ map<int, vector<int> >& invertedMap);
+
+ //data
+ vector<double> d1, d2, d3, d4;
+ vector<vector<int> > children;
+
+ // TODO: inverted map a vector?
+
+ vector<double> trainingDefaults;
+ map<int, vector<int> > trainingInvertedMap;
+
+ vector<double> testDefaults;
+ map<int, vector<int> > testInvertedMap;
+
+};
+/*
+ A Chow-Liu tree is required by FAB-MAP. The Chow-Liu tree provides an
+ estimate of the full distribution of visual words using a minimum spanning
+ tree. The tree is generated through training data.
+*/
+class CV_EXPORTS ChowLiuTree {
+public:
+ ChowLiuTree();
+ virtual ~ChowLiuTree();
+
+ //add data to the chow-liu tree before calling make
+ void add(const Mat& imgDescriptor);
+ void add(const vector<Mat>& imgDescriptors);
+
+ const vector<Mat>& getImgDescriptors() const;
+
+ Mat make(double infoThreshold = 0.0);
+
+private:
+ vector<Mat> imgDescriptors;
+ Mat mergedImgDescriptors;
+
+ typedef struct info {
+ float score;
+ short word1;
+ short word2;
+ } info;
+
+ //probabilities extracted from mergedImgDescriptors
+ double P(int a, bool za);
+ double JP(int a, bool za, int b, bool zb); //a & b
+ double CP(int a, bool za, int b, bool zb); // a | b
+
+ //calculating mutual information of all edges
+ void createBaseEdges(list<info>& edges, double infoThreshold);
+ double calcMutInfo(int word1, int word2);
+ static bool sortInfoScores(const info& first, const info& second);
+
+ //selecting minimum spanning egdges with maximum information
+ bool reduceEdgesToMinSpan(list<info>& edges);
+
+ //building the tree sctructure
+ Mat buildTree(int root_word, list<info> &edges);
+ void recAddToTree(Mat &cltree, int q, int pq,
+ list<info> &remaining_edges);
+ vector<int> extractChildren(list<info> &remaining_edges, int q);
+
+};
+
+/*
+ A custom vocabulary training method based on:
+ http://www.springerlink.com/content/d1h6j8x552532003/
+*/
+class CV_EXPORTS BOWMSCTrainer: public BOWTrainer {
+public:
+ BOWMSCTrainer(double clusterSize = 0.4);
+ virtual ~BOWMSCTrainer();
+
+ // Returns trained vocabulary (i.e. cluster centers).
+ virtual Mat cluster() const;
+ virtual Mat cluster(const Mat& descriptors) const;
+
+protected:
+
+ double clusterSize;
+
+};
+
+}
+
+}
+
+#endif /* OPENFABMAP_H_ */
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/contrib/retina.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/contrib/retina.hpp
new file mode 100644
index 00000000..f261bb45
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/contrib/retina.hpp
@@ -0,0 +1,354 @@
+/*#******************************************************************************
+ ** IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+ **
+ ** By downloading, copying, installing or using the software you agree to this license.
+ ** If you do not agree to this license, do not download, install,
+ ** copy or use the software.
+ **
+ **
+ ** HVStools : interfaces allowing OpenCV users to integrate Human Vision System models. Presented models originate from Jeanny Herault's original research and have been reused and adapted by the author&collaborators for computed vision applications since his thesis with Alice Caplier at Gipsa-Lab.
+ ** Use: extract still images & image sequences features, from contours details to motion spatio-temporal features, etc. for high level visual scene analysis. Also contribute to image enhancement/compression such as tone mapping.
+ **
+ ** Maintainers : Listic lab (code author current affiliation & applications) and Gipsa Lab (original research origins & applications)
+ **
+ ** Creation - enhancement process 2007-2011
+ ** Author: Alexandre Benoit (benoit.alexandre.vision@gmail.com), LISTIC lab, Annecy le vieux, France
+ **
+ ** Theses algorithm have been developped by Alexandre BENOIT since his thesis with Alice Caplier at Gipsa-Lab (www.gipsa-lab.inpg.fr) and the research he pursues at LISTIC Lab (www.listic.univ-savoie.fr).
+ ** Refer to the following research paper for more information:
+ ** Benoit A., Caplier A., Durette B., Herault, J., "USING HUMAN VISUAL SYSTEM MODELING FOR BIO-INSPIRED LOW LEVEL IMAGE PROCESSING", Elsevier, Computer Vision and Image Understanding 114 (2010), pp. 758-773, DOI: http://dx.doi.org/10.1016/j.cviu.2010.01.011
+ ** This work have been carried out thanks to Jeanny Herault who's research and great discussions are the basis of all this work, please take a look at his book:
+ ** Vision: Images, Signals and Neural Networks: Models of Neural Processing in Visual Perception (Progress in Neural Processing),By: Jeanny Herault, ISBN: 9814273686. WAPI (Tower ID): 113266891.
+ **
+ ** The retina filter includes the research contributions of phd/research collegues from which code has been redrawn by the author :
+ ** _take a look at the retinacolor.hpp module to discover Brice Chaix de Lavarene color mosaicing/demosaicing and the reference paper:
+ ** ====> B. Chaix de Lavarene, D. Alleysson, B. Durette, J. Herault (2007). "Efficient demosaicing through recursive filtering", IEEE International Conference on Image Processing ICIP 2007
+ ** _take a look at imagelogpolprojection.hpp to discover retina spatial log sampling which originates from Barthelemy Durette phd with Jeanny Herault. A Retina / V1 cortex projection is also proposed and originates from Jeanny's discussions.
+ ** ====> more informations in the above cited Jeanny Heraults's book.
+ **
+ ** License Agreement
+ ** For Open Source Computer Vision Library
+ **
+ ** Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+ ** Copyright (C) 2008-2011, Willow Garage Inc., all rights reserved.
+ **
+ ** For Human Visual System tools (hvstools)
+ ** Copyright (C) 2007-2011, LISTIC Lab, Annecy le Vieux and GIPSA Lab, Grenoble, France, all rights reserved.
+ **
+ ** Third party copyrights are property of their respective owners.
+ **
+ ** Redistribution and use in source and binary forms, with or without modification,
+ ** are permitted provided that the following conditions are met:
+ **
+ ** * Redistributions of source code must retain the above copyright notice,
+ ** this list of conditions and the following disclaimer.
+ **
+ ** * Redistributions in binary form must reproduce the above copyright notice,
+ ** this list of conditions and the following disclaimer in the documentation
+ ** and/or other materials provided with the distribution.
+ **
+ ** * The name of the copyright holders may not be used to endorse or promote products
+ ** derived from this software without specific prior written permission.
+ **
+ ** This software is provided by the copyright holders and contributors "as is" and
+ ** any express or implied warranties, including, but not limited to, the implied
+ ** warranties of merchantability and fitness for a particular purpose are disclaimed.
+ ** In no event shall the Intel Corporation or contributors be liable for any direct,
+ ** indirect, incidental, special, exemplary, or consequential damages
+ ** (including, but not limited to, procurement of substitute goods or services;
+ ** loss of use, data, or profits; or business interruption) however caused
+ ** and on any theory of liability, whether in contract, strict liability,
+ ** or tort (including negligence or otherwise) arising in any way out of
+ ** the use of this software, even if advised of the possibility of such damage.
+ *******************************************************************************/
+
+#ifndef __OPENCV_CONTRIB_RETINA_HPP__
+#define __OPENCV_CONTRIB_RETINA_HPP__
+
+/*
+ * Retina.hpp
+ *
+ * Created on: Jul 19, 2011
+ * Author: Alexandre Benoit
+ */
+
+#include "opencv2/core/core.hpp" // for all OpenCV core functionalities access, including cv::Exception support
+#include <valarray>
+
+namespace cv
+{
+
+enum RETINA_COLORSAMPLINGMETHOD
+{
+ RETINA_COLOR_RANDOM, //!< each pixel position is either R, G or B in a random choice
+ RETINA_COLOR_DIAGONAL,//!< color sampling is RGBRGBRGB..., line 2 BRGBRGBRG..., line 3, GBRGBRGBR...
+ RETINA_COLOR_BAYER//!< standard bayer sampling
+};
+
+class RetinaFilter;
+
+/**
+ * a wrapper class which allows the Gipsa/Listic Labs model to be used.
+ * This retina model allows spatio-temporal image processing (applied on still images, video sequences).
+ * As a summary, these are the retina model properties:
+ * => It applies a spectral whithening (mid-frequency details enhancement)
+ * => high frequency spatio-temporal noise reduction
+ * => low frequency luminance to be reduced (luminance range compression)
+ * => local logarithmic luminance compression allows details to be enhanced in low light conditions
+ *
+ * USE : this model can be used basically for spatio-temporal video effects but also for :
+ * _using the getParvo method output matrix : texture analysiswith enhanced signal to noise ratio and enhanced details robust against input images luminance ranges
+ * _using the getMagno method output matrix : motion analysis also with the previously cited properties
+ *
+ * for more information, reer to the following papers :
+ * Benoit A., Caplier A., Durette B., Herault, J., "USING HUMAN VISUAL SYSTEM MODELING FOR BIO-INSPIRED LOW LEVEL IMAGE PROCESSING", Elsevier, Computer Vision and Image Understanding 114 (2010), pp. 758-773, DOI: http://dx.doi.org/10.1016/j.cviu.2010.01.011
+ * Vision: Images, Signals and Neural Networks: Models of Neural Processing in Visual Perception (Progress in Neural Processing),By: Jeanny Herault, ISBN: 9814273686. WAPI (Tower ID): 113266891.
+ *
+ * The retina filter includes the research contributions of phd/research collegues from which code has been redrawn by the author :
+ * _take a look at the retinacolor.hpp module to discover Brice Chaix de Lavarene color mosaicing/demosaicing and the reference paper:
+ * ====> B. Chaix de Lavarene, D. Alleysson, B. Durette, J. Herault (2007). "Efficient demosaicing through recursive filtering", IEEE International Conference on Image Processing ICIP 2007
+ * _take a look at imagelogpolprojection.hpp to discover retina spatial log sampling which originates from Barthelemy Durette phd with Jeanny Herault. A Retina / V1 cortex projection is also proposed and originates from Jeanny's discussions.
+ * ====> more informations in the above cited Jeanny Heraults's book.
+ */
+class CV_EXPORTS Retina {
+
+public:
+
+ // parameters structure for better clarity, check explenations on the comments of methods : setupOPLandIPLParvoChannel and setupIPLMagnoChannel
+ struct RetinaParameters{
+ struct OPLandIplParvoParameters{ // Outer Plexiform Layer (OPL) and Inner Plexiform Layer Parvocellular (IplParvo) parameters
+ OPLandIplParvoParameters():colorMode(true),
+ normaliseOutput(true),
+ photoreceptorsLocalAdaptationSensitivity(0.7f),
+ photoreceptorsTemporalConstant(0.5f),
+ photoreceptorsSpatialConstant(0.53f),
+ horizontalCellsGain(0.0f),
+ hcellsTemporalConstant(1.f),
+ hcellsSpatialConstant(7.f),
+ ganglionCellsSensitivity(0.7f){};// default setup
+ bool colorMode, normaliseOutput;
+ float photoreceptorsLocalAdaptationSensitivity, photoreceptorsTemporalConstant, photoreceptorsSpatialConstant, horizontalCellsGain, hcellsTemporalConstant, hcellsSpatialConstant, ganglionCellsSensitivity;
+ };
+ struct IplMagnoParameters{ // Inner Plexiform Layer Magnocellular channel (IplMagno)
+ IplMagnoParameters():
+ normaliseOutput(true),
+ parasolCells_beta(0.f),
+ parasolCells_tau(0.f),
+ parasolCells_k(7.f),
+ amacrinCellsTemporalCutFrequency(1.2f),
+ V0CompressionParameter(0.95f),
+ localAdaptintegration_tau(0.f),
+ localAdaptintegration_k(7.f){};// default setup
+ bool normaliseOutput;
+ float parasolCells_beta, parasolCells_tau, parasolCells_k, amacrinCellsTemporalCutFrequency, V0CompressionParameter, localAdaptintegration_tau, localAdaptintegration_k;
+ };
+ struct OPLandIplParvoParameters OPLandIplParvo;
+ struct IplMagnoParameters IplMagno;
+ };
+
+ /**
+ * Main constructor with most commun use setup : create an instance of color ready retina model
+ * @param inputSize : the input frame size
+ */
+ Retina(Size inputSize);
+
+ /**
+ * Complete Retina filter constructor which allows all basic structural parameters definition
+ * @param inputSize : the input frame size
+ * @param colorMode : the chosen processing mode : with or without color processing
+ * @param colorSamplingMethod: specifies which kind of color sampling will be used
+ * @param useRetinaLogSampling: activate retina log sampling, if true, the 2 following parameters can be used
+ * @param reductionFactor: only usefull if param useRetinaLogSampling=true, specifies the reduction factor of the output frame (as the center (fovea) is high resolution and corners can be underscaled, then a reduction of the output is allowed without precision leak
+ * @param samplingStrenght: only usefull if param useRetinaLogSampling=true, specifies the strenght of the log scale that is applied
+ */
+ Retina(Size inputSize, const bool colorMode, RETINA_COLORSAMPLINGMETHOD colorSamplingMethod=RETINA_COLOR_BAYER, const bool useRetinaLogSampling=false, const double reductionFactor=1.0, const double samplingStrenght=10.0);
+
+ virtual ~Retina();
+
+ /**
+ * retreive retina input buffer size
+ */
+ Size inputSize();
+
+ /**
+ * retreive retina output buffer size
+ */
+ Size outputSize();
+
+ /**
+ * try to open an XML retina parameters file to adjust current retina instance setup
+ * => if the xml file does not exist, then default setup is applied
+ * => warning, Exceptions are thrown if read XML file is not valid
+ * @param retinaParameterFile : the parameters filename
+ * @param applyDefaultSetupOnFailure : set to true if an error must be thrown on error
+ */
+ void setup(std::string retinaParameterFile="", const bool applyDefaultSetupOnFailure=true);
+
+
+ /**
+ * try to open an XML retina parameters file to adjust current retina instance setup
+ * => if the xml file does not exist, then default setup is applied
+ * => warning, Exceptions are thrown if read XML file is not valid
+ * @param fs : the open Filestorage which contains retina parameters
+ * @param applyDefaultSetupOnFailure : set to true if an error must be thrown on error
+ */
+ void setup(cv::FileStorage &fs, const bool applyDefaultSetupOnFailure=true);
+
+ /**
+ * try to open an XML retina parameters file to adjust current retina instance setup
+ * => if the xml file does not exist, then default setup is applied
+ * => warning, Exceptions are thrown if read XML file is not valid
+ * @param newParameters : a parameters structures updated with the new target configuration
+ */
+ void setup(RetinaParameters newParameters);
+
+ /**
+ * @return the current parameters setup
+ */
+ Retina::RetinaParameters getParameters();
+
+ /**
+ * parameters setup display method
+ * @return a string which contains formatted parameters information
+ */
+ const std::string printSetup();
+
+ /**
+ * write xml/yml formated parameters information
+ * @param fs : the filename of the xml file that will be open and writen with formatted parameters information
+ */
+ virtual void write( std::string fs ) const;
+
+
+ /**
+ * write xml/yml formated parameters information
+ * @param fs : a cv::Filestorage object ready to be filled
+ */
+ virtual void write( FileStorage& fs ) const;
+
+ /**
+ * setup the OPL and IPL parvo channels (see biologocal model)
+ * OPL is referred as Outer Plexiform Layer of the retina, it allows the spatio-temporal filtering which withens the spectrum and reduces spatio-temporal noise while attenuating global luminance (low frequency energy)
+ * IPL parvo is the OPL next processing stage, it refers to Inner Plexiform layer of the retina, it allows high contours sensitivity in foveal vision.
+ * for more informations, please have a look at the paper Benoit A., Caplier A., Durette B., Herault, J., "USING HUMAN VISUAL SYSTEM MODELING FOR BIO-INSPIRED LOW LEVEL IMAGE PROCESSING", Elsevier, Computer Vision and Image Understanding 114 (2010), pp. 758-773, DOI: http://dx.doi.org/10.1016/j.cviu.2010.01.011
+ * @param colorMode : specifies if (true) color is processed of not (false) to then processing gray level image
+ * @param normaliseOutput : specifies if (true) output is rescaled between 0 and 255 of not (false)
+ * @param photoreceptorsLocalAdaptationSensitivity: the photoreceptors sensitivity renage is 0-1 (more log compression effect when value increases)
+ * @param photoreceptorsTemporalConstant: the time constant of the first order low pass filter of the photoreceptors, use it to cut high temporal frequencies (noise or fast motion), unit is frames, typical value is 1 frame
+ * @param photoreceptorsSpatialConstant: the spatial constant of the first order low pass filter of the photoreceptors, use it to cut high spatial frequencies (noise or thick contours), unit is pixels, typical value is 1 pixel
+ * @param horizontalCellsGain: gain of the horizontal cells network, if 0, then the mean value of the output is zero, if the parameter is near 1, then, the luminance is not filtered and is still reachable at the output, typicall value is 0
+ * @param HcellsTemporalConstant: the time constant of the first order low pass filter of the horizontal cells, use it to cut low temporal frequencies (local luminance variations), unit is frames, typical value is 1 frame, as the photoreceptors
+ * @param HcellsSpatialConstant: the spatial constant of the first order low pass filter of the horizontal cells, use it to cut low spatial frequencies (local luminance), unit is pixels, typical value is 5 pixel, this value is also used for local contrast computing when computing the local contrast adaptation at the ganglion cells level (Inner Plexiform Layer parvocellular channel model)
+ * @param ganglionCellsSensitivity: the compression strengh of the ganglion cells local adaptation output, set a value between 160 and 250 for best results, a high value increases more the low value sensitivity... and the output saturates faster, recommended value: 230
+ */
+ void setupOPLandIPLParvoChannel(const bool colorMode=true, const bool normaliseOutput = true, const float photoreceptorsLocalAdaptationSensitivity=0.7f, const float photoreceptorsTemporalConstant=0.5f, const float photoreceptorsSpatialConstant=0.53f, const float horizontalCellsGain=0, const float HcellsTemporalConstant=1, const float HcellsSpatialConstant=7, const float ganglionCellsSensitivity=0.7f);
+
+ /**
+ * set parameters values for the Inner Plexiform Layer (IPL) magnocellular channel
+ * this channel processes signals outpint from OPL processing stage in peripheral vision, it allows motion information enhancement. It is decorrelated from the details channel. See reference paper for more details.
+ * @param normaliseOutput : specifies if (true) output is rescaled between 0 and 255 of not (false)
+ * @param parasolCells_beta: the low pass filter gain used for local contrast adaptation at the IPL level of the retina (for ganglion cells local adaptation), typical value is 0
+ * @param parasolCells_tau: the low pass filter time constant used for local contrast adaptation at the IPL level of the retina (for ganglion cells local adaptation), unit is frame, typical value is 0 (immediate response)
+ * @param parasolCells_k: the low pass filter spatial constant used for local contrast adaptation at the IPL level of the retina (for ganglion cells local adaptation), unit is pixels, typical value is 5
+ * @param amacrinCellsTemporalCutFrequency: the time constant of the first order high pass fiter of the magnocellular way (motion information channel), unit is frames, tipicall value is 5
+ * @param V0CompressionParameter: the compression strengh of the ganglion cells local adaptation output, set a value between 160 and 250 for best results, a high value increases more the low value sensitivity... and the output saturates faster, recommended value: 200
+ * @param localAdaptintegration_tau: specifies the temporal constant of the low pas filter involved in the computation of the local "motion mean" for the local adaptation computation
+ * @param localAdaptintegration_k: specifies the spatial constant of the low pas filter involved in the computation of the local "motion mean" for the local adaptation computation
+ */
+ void setupIPLMagnoChannel(const bool normaliseOutput = true, const float parasolCells_beta=0, const float parasolCells_tau=0, const float parasolCells_k=7, const float amacrinCellsTemporalCutFrequency=1.2f, const float V0CompressionParameter=0.95f, const float localAdaptintegration_tau=0, const float localAdaptintegration_k=7);
+
+ /**
+ * method which allows retina to be applied on an input image, after run, encapsulated retina module is ready to deliver its outputs using dedicated acccessors, see getParvo and getMagno methods
+ * @param inputImage : the input cv::Mat image to be processed, can be gray level or BGR coded in any format (from 8bit to 16bits)
+ */
+ void run(const Mat &inputImage);
+
+ /**
+ * accessor of the details channel of the retina (models foveal vision)
+ * @param retinaOutput_parvo : the output buffer (reallocated if necessary), this output is rescaled for standard 8bits image processing use in OpenCV
+ */
+ void getParvo(Mat &retinaOutput_parvo);
+
+ /**
+ * accessor of the details channel of the retina (models foveal vision)
+ * @param retinaOutput_parvo : the output buffer (reallocated if necessary), this output is the original retina filter model output, without any quantification or rescaling
+ */
+ void getParvo(std::valarray<float> &retinaOutput_parvo);
+
+ /**
+ * accessor of the motion channel of the retina (models peripheral vision)
+ * @param retinaOutput_magno : the output buffer (reallocated if necessary), this output is rescaled for standard 8bits image processing use in OpenCV
+ */
+ void getMagno(Mat &retinaOutput_magno);
+
+ /**
+ * accessor of the motion channel of the retina (models peripheral vision)
+ * @param retinaOutput_magno : the output buffer (reallocated if necessary), this output is the original retina filter model output, without any quantification or rescaling
+ */
+ void getMagno(std::valarray<float> &retinaOutput_magno);
+
+ // original API level data accessors : get buffers addresses...
+ const std::valarray<float> & getMagno() const;
+ const std::valarray<float> & getParvo() const;
+
+ /**
+ * activate color saturation as the final step of the color demultiplexing process
+ * -> this saturation is a sigmoide function applied to each channel of the demultiplexed image.
+ * @param saturateColors: boolean that activates color saturation (if true) or desactivate (if false)
+ * @param colorSaturationValue: the saturation factor
+ */
+ void setColorSaturation(const bool saturateColors=true, const float colorSaturationValue=4.0);
+
+ /**
+ * clear all retina buffers (equivalent to opening the eyes after a long period of eye close ;o)
+ */
+ void clearBuffers();
+
+ /**
+ * Activate/desactivate the Magnocellular pathway processing (motion information extraction), by default, it is activated
+ * @param activate: true if Magnocellular output should be activated, false if not
+ */
+ void activateMovingContoursProcessing(const bool activate);
+
+ /**
+ * Activate/desactivate the Parvocellular pathway processing (contours information extraction), by default, it is activated
+ * @param activate: true if Parvocellular (contours information extraction) output should be activated, false if not
+ */
+ void activateContoursProcessing(const bool activate);
+
+protected:
+ // Parameteres setup members
+ RetinaParameters _retinaParameters; // structure of parameters
+
+ // Retina model related modules
+ std::valarray<float> _inputBuffer; //!< buffer used to convert input cv::Mat to internal retina buffers format (valarrays)
+
+ // pointer to retina model
+ RetinaFilter* _retinaFilter; //!< the pointer to the retina module, allocated with instance construction
+
+ /**
+ * exports a valarray buffer outing from HVStools objects to a cv::Mat in CV_8UC1 (gray level picture) or CV_8UC3 (color) format
+ * @param grayMatrixToConvert the valarray to export to OpenCV
+ * @param nbRows : the number of rows of the valarray flatten matrix
+ * @param nbColumns : the number of rows of the valarray flatten matrix
+ * @param colorMode : a flag which mentions if matrix is color (true) or graylevel (false)
+ * @param outBuffer : the output matrix which is reallocated to satisfy Retina output buffer dimensions
+ */
+ void _convertValarrayBuffer2cvMat(const std::valarray<float> &grayMatrixToConvert, const unsigned int nbRows, const unsigned int nbColumns, const bool colorMode, Mat &outBuffer);
+
+ /**
+ *
+ * @param inputMatToConvert : the OpenCV cv::Mat that has to be converted to gray or RGB valarray buffer that will be processed by the retina model
+ * @param outputValarrayMatrix : the output valarray
+ * @return the input image color mode (color=true, gray levels=false)
+ */
+ bool _convertCvMat2ValarrayBuffer(const cv::Mat inputMatToConvert, std::valarray<float> &outputValarrayMatrix);
+
+ //! private method called by constructors, gathers their parameters and use them in a unified way
+ void _init(const Size inputSize, const bool colorMode, RETINA_COLORSAMPLINGMETHOD colorSamplingMethod=RETINA_COLOR_BAYER, const bool useRetinaLogSampling=false, const double reductionFactor=1.0, const double samplingStrenght=10.0);
+
+
+};
+
+}
+#endif /* __OPENCV_CONTRIB_RETINA_HPP__ */
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core.hpp
new file mode 100644
index 00000000..12773f8c
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core.hpp
@@ -0,0 +1,43 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#include "opencv2/core/core.hpp"
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/affine.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/affine.hpp
new file mode 100644
index 00000000..1b560c8e
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/affine.hpp
@@ -0,0 +1,513 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_CORE_AFFINE3_HPP__
+#define __OPENCV_CORE_AFFINE3_HPP__
+
+#ifdef __cplusplus
+
+#include <opencv2/core/core.hpp>
+
+/*! @file */
+
+namespace cv
+{
+ template<typename T>
+ class Affine3
+ {
+ public:
+ typedef T float_type;
+ typedef Matx<float_type, 3, 3> Mat3;
+ typedef Matx<float_type, 4, 4> Mat4;
+ typedef Vec<float_type, 3> Vec3;
+
+ Affine3();
+
+ //Augmented affine matrix
+ Affine3(const Mat4& affine);
+
+ //Rotation matrix
+ Affine3(const Mat3& R, const Vec3& t = Vec3::all(0));
+
+ //Rodrigues vector
+ Affine3(const Vec3& rvec, const Vec3& t = Vec3::all(0));
+
+ //Combines all contructors above. Supports 4x4, 4x3, 3x3, 1x3, 3x1 sizes of data matrix
+ explicit Affine3(const Mat& data, const Vec3& t = Vec3::all(0));
+
+ //From 16th element array
+ explicit Affine3(const float_type* vals);
+
+ static Affine3 Identity();
+
+ //Rotation matrix
+ void rotation(const Mat3& R);
+
+ //Rodrigues vector
+ void rotation(const Vec3& rvec);
+
+ //Combines rotation methods above. Suports 3x3, 1x3, 3x1 sizes of data matrix;
+ void rotation(const Mat& data);
+
+ void linear(const Mat3& L);
+ void translation(const Vec3& t);
+
+ Mat3 rotation() const;
+ Mat3 linear() const;
+ Vec3 translation() const;
+
+ //Rodrigues vector
+ Vec3 rvec() const;
+
+ Affine3 inv(int method = cv::DECOMP_SVD) const;
+
+ // a.rotate(R) is equivalent to Affine(R, 0) * a;
+ Affine3 rotate(const Mat3& R) const;
+
+ // a.rotate(R) is equivalent to Affine(rvec, 0) * a;
+ Affine3 rotate(const Vec3& rvec) const;
+
+ // a.translate(t) is equivalent to Affine(E, t) * a;
+ Affine3 translate(const Vec3& t) const;
+
+ // a.concatenate(affine) is equivalent to affine * a;
+ Affine3 concatenate(const Affine3& affine) const;
+
+ template <typename Y> operator Affine3<Y>() const;
+
+ template <typename Y> Affine3<Y> cast() const;
+
+ Mat4 matrix;
+
+#if defined EIGEN_WORLD_VERSION && defined EIGEN_GEOMETRY_MODULE_H
+ Affine3(const Eigen::Transform<T, 3, Eigen::Affine, (Eigen::RowMajor)>& affine);
+ Affine3(const Eigen::Transform<T, 3, Eigen::Affine>& affine);
+ operator Eigen::Transform<T, 3, Eigen::Affine, (Eigen::RowMajor)>() const;
+ operator Eigen::Transform<T, 3, Eigen::Affine>() const;
+#endif
+ };
+
+ template<typename T> static
+ Affine3<T> operator*(const Affine3<T>& affine1, const Affine3<T>& affine2);
+
+ template<typename T, typename V> static
+ V operator*(const Affine3<T>& affine, const V& vector);
+
+ typedef Affine3<float> Affine3f;
+ typedef Affine3<double> Affine3d;
+
+ static Vec3f operator*(const Affine3f& affine, const Vec3f& vector);
+ static Vec3d operator*(const Affine3d& affine, const Vec3d& vector);
+
+ template<typename _Tp> class DataType< Affine3<_Tp> >
+ {
+ public:
+ typedef Affine3<_Tp> value_type;
+ typedef Affine3<typename DataType<_Tp>::work_type> work_type;
+ typedef _Tp channel_type;
+
+ enum { generic_type = 0,
+ depth = DataType<channel_type>::depth,
+ channels = 16,
+ fmt = DataType<channel_type>::fmt + ((channels - 1) << 8),
+ type = CV_MAKETYPE(depth, channels)
+ };
+
+ typedef Vec<channel_type, channels> vec_type;
+ };
+}
+
+
+///////////////////////////////////////////////////////////////////////////////////
+/// Implementaiton
+
+template<typename T> inline
+cv::Affine3<T>::Affine3()
+ : matrix(Mat4::eye())
+{}
+
+template<typename T> inline
+cv::Affine3<T>::Affine3(const Mat4& affine)
+ : matrix(affine)
+{}
+
+template<typename T> inline
+cv::Affine3<T>::Affine3(const Mat3& R, const Vec3& t)
+{
+ rotation(R);
+ translation(t);
+ matrix.val[12] = matrix.val[13] = matrix.val[14] = 0;
+ matrix.val[15] = 1;
+}
+
+template<typename T> inline
+cv::Affine3<T>::Affine3(const Vec3& _rvec, const Vec3& t)
+{
+ rotation(_rvec);
+ translation(t);
+ matrix.val[12] = matrix.val[13] = matrix.val[14] = 0;
+ matrix.val[15] = 1;
+}
+
+template<typename T> inline
+cv::Affine3<T>::Affine3(const cv::Mat& data, const Vec3& t)
+{
+ CV_Assert(data.type() == cv::DataType<T>::type);
+
+ if (data.cols == 4 && data.rows == 4)
+ {
+ data.copyTo(matrix);
+ return;
+ }
+ else if (data.cols == 4 && data.rows == 3)
+ {
+ rotation(data(Rect(0, 0, 3, 3)));
+ translation(data(Rect(3, 0, 1, 3)));
+ return;
+ }
+
+ rotation(data);
+ translation(t);
+ matrix.val[12] = matrix.val[13] = matrix.val[14] = 0;
+ matrix.val[15] = 1;
+}
+
+template<typename T> inline
+cv::Affine3<T>::Affine3(const float_type* vals) : matrix(vals)
+{}
+
+template<typename T> inline
+cv::Affine3<T> cv::Affine3<T>::Identity()
+{
+ return Affine3<T>(cv::Affine3<T>::Mat4::eye());
+}
+
+template<typename T> inline
+void cv::Affine3<T>::rotation(const Mat3& R)
+{
+ linear(R);
+}
+
+template<typename T> inline
+void cv::Affine3<T>::rotation(const Vec3& _rvec)
+{
+ double rx = _rvec[0], ry = _rvec[1], rz = _rvec[2];
+ double theta = std::sqrt(rx*rx + ry*ry + rz*rz);
+
+ if (theta < DBL_EPSILON)
+ rotation(Mat3::eye());
+ else
+ {
+ const double I[] = { 1, 0, 0, 0, 1, 0, 0, 0, 1 };
+
+ double c = std::cos(theta);
+ double s = std::sin(theta);
+ double c1 = 1. - c;
+ double itheta = (theta != 0) ? 1./theta : 0.;
+
+ rx *= itheta; ry *= itheta; rz *= itheta;
+
+ double rrt[] = { rx*rx, rx*ry, rx*rz, rx*ry, ry*ry, ry*rz, rx*rz, ry*rz, rz*rz };
+ double _r_x_[] = { 0, -rz, ry, rz, 0, -rx, -ry, rx, 0 };
+ Mat3 R;
+
+ // R = cos(theta)*I + (1 - cos(theta))*r*rT + sin(theta)*[r_x]
+ // where [r_x] is [0 -rz ry; rz 0 -rx; -ry rx 0]
+ for(int k = 0; k < 9; ++k)
+ R.val[k] = static_cast<float_type>(c*I[k] + c1*rrt[k] + s*_r_x_[k]);
+
+ rotation(R);
+ }
+}
+
+//Combines rotation methods above. Suports 3x3, 1x3, 3x1 sizes of data matrix;
+template<typename T> inline
+void cv::Affine3<T>::rotation(const cv::Mat& data)
+{
+ CV_Assert(data.type() == cv::DataType<T>::type);
+
+ if (data.cols == 3 && data.rows == 3)
+ {
+ Mat3 R;
+ data.copyTo(R);
+ rotation(R);
+ }
+ else if ((data.cols == 3 && data.rows == 1) || (data.cols == 1 && data.rows == 3))
+ {
+ Vec3 _rvec;
+ data.reshape(1, 3).copyTo(_rvec);
+ rotation(_rvec);
+ }
+ else
+ CV_Assert(!"Input marix can be 3x3, 1x3 or 3x1");
+}
+
+template<typename T> inline
+void cv::Affine3<T>::linear(const Mat3& L)
+{
+ matrix.val[0] = L.val[0]; matrix.val[1] = L.val[1]; matrix.val[ 2] = L.val[2];
+ matrix.val[4] = L.val[3]; matrix.val[5] = L.val[4]; matrix.val[ 6] = L.val[5];
+ matrix.val[8] = L.val[6]; matrix.val[9] = L.val[7]; matrix.val[10] = L.val[8];
+}
+
+template<typename T> inline
+void cv::Affine3<T>::translation(const Vec3& t)
+{
+ matrix.val[3] = t[0]; matrix.val[7] = t[1]; matrix.val[11] = t[2];
+}
+
+template<typename T> inline
+typename cv::Affine3<T>::Mat3 cv::Affine3<T>::rotation() const
+{
+ return linear();
+}
+
+template<typename T> inline
+typename cv::Affine3<T>::Mat3 cv::Affine3<T>::linear() const
+{
+ typename cv::Affine3<T>::Mat3 R;
+ R.val[0] = matrix.val[0]; R.val[1] = matrix.val[1]; R.val[2] = matrix.val[ 2];
+ R.val[3] = matrix.val[4]; R.val[4] = matrix.val[5]; R.val[5] = matrix.val[ 6];
+ R.val[6] = matrix.val[8]; R.val[7] = matrix.val[9]; R.val[8] = matrix.val[10];
+ return R;
+}
+
+template<typename T> inline
+typename cv::Affine3<T>::Vec3 cv::Affine3<T>::translation() const
+{
+ return Vec3(matrix.val[3], matrix.val[7], matrix.val[11]);
+}
+
+template<typename T> inline
+typename cv::Affine3<T>::Vec3 cv::Affine3<T>::rvec() const
+{
+ cv::Vec3d w;
+ cv::Matx33d u, vt, R = rotation();
+ cv::SVD::compute(R, w, u, vt, cv::SVD::FULL_UV + cv::SVD::MODIFY_A);
+ R = u * vt;
+
+ double rx = R.val[7] - R.val[5];
+ double ry = R.val[2] - R.val[6];
+ double rz = R.val[3] - R.val[1];
+
+ double s = std::sqrt((rx*rx + ry*ry + rz*rz)*0.25);
+ double c = (R.val[0] + R.val[4] + R.val[8] - 1) * 0.5;
+ c = c > 1.0 ? 1.0 : c < -1.0 ? -1.0 : c;
+ double theta = acos(c);
+
+ if( s < 1e-5 )
+ {
+ if( c > 0 )
+ rx = ry = rz = 0;
+ else
+ {
+ double t;
+ t = (R.val[0] + 1) * 0.5;
+ rx = std::sqrt(std::max(t, 0.0));
+ t = (R.val[4] + 1) * 0.5;
+ ry = std::sqrt(std::max(t, 0.0)) * (R.val[1] < 0 ? -1.0 : 1.0);
+ t = (R.val[8] + 1) * 0.5;
+ rz = std::sqrt(std::max(t, 0.0)) * (R.val[2] < 0 ? -1.0 : 1.0);
+
+ if( fabs(rx) < fabs(ry) && fabs(rx) < fabs(rz) && (R.val[5] > 0) != (ry*rz > 0) )
+ rz = -rz;
+ theta /= std::sqrt(rx*rx + ry*ry + rz*rz);
+ rx *= theta;
+ ry *= theta;
+ rz *= theta;
+ }
+ }
+ else
+ {
+ double vth = 1/(2*s);
+ vth *= theta;
+ rx *= vth; ry *= vth; rz *= vth;
+ }
+
+ return cv::Vec3d(rx, ry, rz);
+}
+
+template<typename T> inline
+cv::Affine3<T> cv::Affine3<T>::inv(int method) const
+{
+ return matrix.inv(method);
+}
+
+template<typename T> inline
+cv::Affine3<T> cv::Affine3<T>::rotate(const Mat3& R) const
+{
+ Mat3 Lc = linear();
+ Vec3 tc = translation();
+ Mat4 result;
+ result.val[12] = result.val[13] = result.val[14] = 0;
+ result.val[15] = 1;
+
+ for(int j = 0; j < 3; ++j)
+ {
+ for(int i = 0; i < 3; ++i)
+ {
+ float_type value = 0;
+ for(int k = 0; k < 3; ++k)
+ value += R(j, k) * Lc(k, i);
+ result(j, i) = value;
+ }
+
+ result(j, 3) = R.row(j).dot(tc.t());
+ }
+ return result;
+}
+
+template<typename T> inline
+cv::Affine3<T> cv::Affine3<T>::rotate(const Vec3& _rvec) const
+{
+ return rotate(Affine3f(_rvec).rotation());
+}
+
+template<typename T> inline
+cv::Affine3<T> cv::Affine3<T>::translate(const Vec3& t) const
+{
+ Mat4 m = matrix;
+ m.val[ 3] += t[0];
+ m.val[ 7] += t[1];
+ m.val[11] += t[2];
+ return m;
+}
+
+template<typename T> inline
+cv::Affine3<T> cv::Affine3<T>::concatenate(const Affine3<T>& affine) const
+{
+ return (*this).rotate(affine.rotation()).translate(affine.translation());
+}
+
+template<typename T> template <typename Y> inline
+cv::Affine3<T>::operator Affine3<Y>() const
+{
+ return Affine3<Y>(matrix);
+}
+
+template<typename T> template <typename Y> inline
+cv::Affine3<Y> cv::Affine3<T>::cast() const
+{
+ return Affine3<Y>(matrix);
+}
+
+/** @cond IGNORED */
+template<typename T> inline
+cv::Affine3<T> cv::operator*(const cv::Affine3<T>& affine1, const cv::Affine3<T>& affine2)
+{
+ return affine2.concatenate(affine1);
+}
+
+template<typename T, typename V> inline
+V cv::operator*(const cv::Affine3<T>& affine, const V& v)
+{
+ const typename Affine3<T>::Mat4& m = affine.matrix;
+
+ V r;
+ r.x = m.val[0] * v.x + m.val[1] * v.y + m.val[ 2] * v.z + m.val[ 3];
+ r.y = m.val[4] * v.x + m.val[5] * v.y + m.val[ 6] * v.z + m.val[ 7];
+ r.z = m.val[8] * v.x + m.val[9] * v.y + m.val[10] * v.z + m.val[11];
+ return r;
+}
+/** @endcond */
+
+static inline
+cv::Vec3f cv::operator*(const cv::Affine3f& affine, const cv::Vec3f& v)
+{
+ const cv::Matx44f& m = affine.matrix;
+ cv::Vec3f r;
+ r.val[0] = m.val[0] * v[0] + m.val[1] * v[1] + m.val[ 2] * v[2] + m.val[ 3];
+ r.val[1] = m.val[4] * v[0] + m.val[5] * v[1] + m.val[ 6] * v[2] + m.val[ 7];
+ r.val[2] = m.val[8] * v[0] + m.val[9] * v[1] + m.val[10] * v[2] + m.val[11];
+ return r;
+}
+
+static inline
+cv::Vec3d cv::operator*(const cv::Affine3d& affine, const cv::Vec3d& v)
+{
+ const cv::Matx44d& m = affine.matrix;
+ cv::Vec3d r;
+ r.val[0] = m.val[0] * v[0] + m.val[1] * v[1] + m.val[ 2] * v[2] + m.val[ 3];
+ r.val[1] = m.val[4] * v[0] + m.val[5] * v[1] + m.val[ 6] * v[2] + m.val[ 7];
+ r.val[2] = m.val[8] * v[0] + m.val[9] * v[1] + m.val[10] * v[2] + m.val[11];
+ return r;
+}
+
+
+
+#if defined EIGEN_WORLD_VERSION && defined EIGEN_GEOMETRY_MODULE_H
+
+template<typename T> inline
+cv::Affine3<T>::Affine3(const Eigen::Transform<T, 3, Eigen::Affine, (Eigen::RowMajor)>& affine)
+{
+ cv::Mat(4, 4, cv::DataType<T>::type, affine.matrix().data()).copyTo(matrix);
+}
+
+template<typename T> inline
+cv::Affine3<T>::Affine3(const Eigen::Transform<T, 3, Eigen::Affine>& affine)
+{
+ Eigen::Transform<T, 3, Eigen::Affine, (Eigen::RowMajor)> a = affine;
+ cv::Mat(4, 4, cv::DataType<T>::type, a.matrix().data()).copyTo(matrix);
+}
+
+template<typename T> inline
+cv::Affine3<T>::operator Eigen::Transform<T, 3, Eigen::Affine, (Eigen::RowMajor)>() const
+{
+ Eigen::Transform<T, 3, Eigen::Affine, (Eigen::RowMajor)> r;
+ cv::Mat hdr(4, 4, cv::DataType<T>::type, r.matrix().data());
+ cv::Mat(matrix, false).copyTo(hdr);
+ return r;
+}
+
+template<typename T> inline
+cv::Affine3<T>::operator Eigen::Transform<T, 3, Eigen::Affine>() const
+{
+ return this->operator Eigen::Transform<T, 3, Eigen::Affine, (Eigen::RowMajor)>();
+}
+
+#endif /* defined EIGEN_WORLD_VERSION && defined EIGEN_GEOMETRY_MODULE_H */
+
+
+#endif /* __cplusplus */
+
+#endif /* __OPENCV_CORE_AFFINE3_HPP__ */
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/core.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/core.hpp
new file mode 100644
index 00000000..591d50ad
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/core.hpp
@@ -0,0 +1,4924 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_CORE_HPP__
+#define __OPENCV_CORE_HPP__
+
+#include "opencv2/core/types_c.h"
+#include "opencv2/core/version.hpp"
+
+#ifdef __cplusplus
+
+#ifndef SKIP_INCLUDES
+#include <limits.h>
+#include <algorithm>
+#include <cmath>
+#include <cstddef>
+#include <complex>
+#include <map>
+#include <new>
+#include <string>
+#include <vector>
+#include <sstream>
+#endif // SKIP_INCLUDES
+
+/*! \namespace cv
+ Namespace where all the C++ OpenCV functionality resides
+*/
+namespace cv {
+
+#undef abs
+#undef min
+#undef max
+#undef Complex
+
+using std::vector;
+using std::string;
+using std::ptrdiff_t;
+
+template<typename _Tp> class Size_;
+template<typename _Tp> class Point_;
+template<typename _Tp> class Rect_;
+template<typename _Tp, int cn> class Vec;
+template<typename _Tp, int m, int n> class Matx;
+
+typedef std::string String;
+
+class Mat;
+class SparseMat;
+typedef Mat MatND;
+
+namespace ogl {
+ class Buffer;
+ class Texture2D;
+ class Arrays;
+}
+
+// < Deprecated
+class GlBuffer;
+class GlTexture;
+class GlArrays;
+class GlCamera;
+// >
+
+namespace gpu {
+ class GpuMat;
+}
+
+class CV_EXPORTS MatExpr;
+class CV_EXPORTS MatOp_Base;
+class CV_EXPORTS MatArg;
+class CV_EXPORTS MatConstIterator;
+
+template<typename _Tp> class Mat_;
+template<typename _Tp> class MatIterator_;
+template<typename _Tp> class MatConstIterator_;
+template<typename _Tp> class MatCommaInitializer_;
+
+#if !defined(ANDROID) || (defined(_GLIBCXX_USE_WCHAR_T) && _GLIBCXX_USE_WCHAR_T)
+typedef std::basic_string<wchar_t> WString;
+
+CV_EXPORTS string fromUtf16(const WString& str);
+CV_EXPORTS WString toUtf16(const string& str);
+#endif
+
+CV_EXPORTS string format( const char* fmt, ... );
+CV_EXPORTS string tempfile( const char* suffix CV_DEFAULT(0));
+
+// matrix decomposition types
+enum { DECOMP_LU=0, DECOMP_SVD=1, DECOMP_EIG=2, DECOMP_CHOLESKY=3, DECOMP_QR=4, DECOMP_NORMAL=16 };
+enum { NORM_INF=1, NORM_L1=2, NORM_L2=4, NORM_L2SQR=5, NORM_HAMMING=6, NORM_HAMMING2=7, NORM_TYPE_MASK=7, NORM_RELATIVE=8, NORM_MINMAX=32 };
+enum { CMP_EQ=0, CMP_GT=1, CMP_GE=2, CMP_LT=3, CMP_LE=4, CMP_NE=5 };
+enum { GEMM_1_T=1, GEMM_2_T=2, GEMM_3_T=4 };
+enum { DFT_INVERSE=1, DFT_SCALE=2, DFT_ROWS=4, DFT_COMPLEX_OUTPUT=16, DFT_REAL_OUTPUT=32,
+ DCT_INVERSE = DFT_INVERSE, DCT_ROWS=DFT_ROWS };
+
+
+/*!
+ The standard OpenCV exception class.
+ Instances of the class are thrown by various functions and methods in the case of critical errors.
+ */
+class CV_EXPORTS Exception : public std::exception
+{
+public:
+ /*!
+ Default constructor
+ */
+ Exception();
+ /*!
+ Full constructor. Normally the constuctor is not called explicitly.
+ Instead, the macros CV_Error(), CV_Error_() and CV_Assert() are used.
+ */
+ Exception(int _code, const string& _err, const string& _func, const string& _file, int _line);
+ virtual ~Exception() throw();
+
+ /*!
+ \return the error description and the context as a text string.
+ */
+ virtual const char *what() const throw();
+ void formatMessage();
+
+ string msg; ///< the formatted error message
+
+ int code; ///< error code @see CVStatus
+ string err; ///< error description
+ string func; ///< function name. Available only when the compiler supports getting it
+ string file; ///< source file name where the error has occured
+ int line; ///< line number in the source file where the error has occured
+};
+
+
+//! Signals an error and raises the exception.
+
+/*!
+ By default the function prints information about the error to stderr,
+ then it either stops if setBreakOnError() had been called before or raises the exception.
+ It is possible to alternate error processing by using redirectError().
+
+ \param exc the exception raisen.
+ */
+CV_EXPORTS void error( const Exception& exc );
+
+//! Sets/resets the break-on-error mode.
+
+/*!
+ When the break-on-error mode is set, the default error handler
+ issues a hardware exception, which can make debugging more convenient.
+
+ \return the previous state
+ */
+CV_EXPORTS bool setBreakOnError(bool flag);
+
+typedef int (CV_CDECL *ErrorCallback)( int status, const char* func_name,
+ const char* err_msg, const char* file_name,
+ int line, void* userdata );
+
+//! Sets the new error handler and the optional user data.
+
+/*!
+ The function sets the new error handler, called from cv::error().
+
+ \param errCallback the new error handler. If NULL, the default error handler is used.
+ \param userdata the optional user data pointer, passed to the callback.
+ \param prevUserdata the optional output parameter where the previous user data pointer is stored
+
+ \return the previous error handler
+*/
+CV_EXPORTS ErrorCallback redirectError( ErrorCallback errCallback,
+ void* userdata=0, void** prevUserdata=0);
+
+
+#if defined __GNUC__
+#define CV_Func __func__
+#elif defined _MSC_VER
+#define CV_Func __FUNCTION__
+#else
+#define CV_Func ""
+#endif
+
+#define CV_Error( code, msg ) cv::error( cv::Exception(code, msg, CV_Func, __FILE__, __LINE__) )
+#define CV_Error_( code, args ) cv::error( cv::Exception(code, cv::format args, CV_Func, __FILE__, __LINE__) )
+#define CV_Assert( expr ) if(!!(expr)) ; else cv::error( cv::Exception(CV_StsAssert, #expr, CV_Func, __FILE__, __LINE__) )
+
+#ifdef _DEBUG
+#define CV_DbgAssert(expr) CV_Assert(expr)
+#else
+#define CV_DbgAssert(expr)
+#endif
+
+CV_EXPORTS void glob(String pattern, std::vector<String>& result, bool recursive = false);
+
+CV_EXPORTS_W void setNumThreads(int nthreads);
+CV_EXPORTS_W int getNumThreads();
+CV_EXPORTS_W int getThreadNum();
+
+CV_EXPORTS_W const string& getBuildInformation();
+
+//! Returns the number of ticks.
+
+/*!
+ The function returns the number of ticks since the certain event (e.g. when the machine was turned on).
+ It can be used to initialize cv::RNG or to measure a function execution time by reading the tick count
+ before and after the function call. The granularity of ticks depends on the hardware and OS used. Use
+ cv::getTickFrequency() to convert ticks to seconds.
+*/
+CV_EXPORTS_W int64 getTickCount();
+
+/*!
+ Returns the number of ticks per seconds.
+
+ The function returns the number of ticks (as returned by cv::getTickCount()) per second.
+ The following code computes the execution time in milliseconds:
+
+ \code
+ double exec_time = (double)getTickCount();
+ // do something ...
+ exec_time = ((double)getTickCount() - exec_time)*1000./getTickFrequency();
+ \endcode
+*/
+CV_EXPORTS_W double getTickFrequency();
+
+/*!
+ Returns the number of CPU ticks.
+
+ On platforms where the feature is available, the function returns the number of CPU ticks
+ since the certain event (normally, the system power-on moment). Using this function
+ one can accurately measure the execution time of very small code fragments,
+ for which cv::getTickCount() granularity is not enough.
+*/
+CV_EXPORTS_W int64 getCPUTickCount();
+
+/*!
+ Returns SSE etc. support status
+
+ The function returns true if certain hardware features are available.
+ Currently, the following features are recognized:
+ - CV_CPU_MMX - MMX
+ - CV_CPU_SSE - SSE
+ - CV_CPU_SSE2 - SSE 2
+ - CV_CPU_SSE3 - SSE 3
+ - CV_CPU_SSSE3 - SSSE 3
+ - CV_CPU_SSE4_1 - SSE 4.1
+ - CV_CPU_SSE4_2 - SSE 4.2
+ - CV_CPU_POPCNT - POPCOUNT
+ - CV_CPU_AVX - AVX
+ - CV_CPU_AVX2 - AVX2
+
+ \note {Note that the function output is not static. Once you called cv::useOptimized(false),
+ most of the hardware acceleration is disabled and thus the function will returns false,
+ until you call cv::useOptimized(true)}
+*/
+CV_EXPORTS_W bool checkHardwareSupport(int feature);
+
+//! returns the number of CPUs (including hyper-threading)
+CV_EXPORTS_W int getNumberOfCPUs();
+
+/*!
+ Allocates memory buffer
+
+ This is specialized OpenCV memory allocation function that returns properly aligned memory buffers.
+ The usage is identical to malloc(). The allocated buffers must be freed with cv::fastFree().
+ If there is not enough memory, the function calls cv::error(), which raises an exception.
+
+ \param bufSize buffer size in bytes
+ \return the allocated memory buffer.
+*/
+CV_EXPORTS void* fastMalloc(size_t bufSize);
+
+/*!
+ Frees the memory allocated with cv::fastMalloc
+
+ This is the corresponding deallocation function for cv::fastMalloc().
+ When ptr==NULL, the function has no effect.
+*/
+CV_EXPORTS void fastFree(void* ptr);
+
+template<typename _Tp> static inline _Tp* allocate(size_t n)
+{
+ return new _Tp[n];
+}
+
+template<typename _Tp> static inline void deallocate(_Tp* ptr, size_t)
+{
+ delete[] ptr;
+}
+
+/*!
+ Aligns pointer by the certain number of bytes
+
+ This small inline function aligns the pointer by the certian number of bytes by shifting
+ it forward by 0 or a positive offset.
+*/
+template<typename _Tp> static inline _Tp* alignPtr(_Tp* ptr, int n=(int)sizeof(_Tp))
+{
+ return (_Tp*)(((size_t)ptr + n-1) & -n);
+}
+
+/*!
+ Aligns buffer size by the certain number of bytes
+
+ This small inline function aligns a buffer size by the certian number of bytes by enlarging it.
+*/
+static inline size_t alignSize(size_t sz, int n)
+{
+ assert((n & (n - 1)) == 0); // n is a power of 2
+ return (sz + n-1) & -n;
+}
+
+/*!
+ Turns on/off available optimization
+
+ The function turns on or off the optimized code in OpenCV. Some optimization can not be enabled
+ or disabled, but, for example, most of SSE code in OpenCV can be temporarily turned on or off this way.
+
+ \note{Since optimization may imply using special data structures, it may be unsafe
+ to call this function anywhere in the code. Instead, call it somewhere at the top level.}
+*/
+CV_EXPORTS_W void setUseOptimized(bool onoff);
+
+/*!
+ Returns the current optimization status
+
+ The function returns the current optimization status, which is controlled by cv::setUseOptimized().
+*/
+CV_EXPORTS_W bool useOptimized();
+
+/*!
+ The STL-compilant memory Allocator based on cv::fastMalloc() and cv::fastFree()
+*/
+template<typename _Tp> class Allocator
+{
+public:
+ typedef _Tp value_type;
+ typedef value_type* pointer;
+ typedef const value_type* const_pointer;
+ typedef value_type& reference;
+ typedef const value_type& const_reference;
+ typedef size_t size_type;
+ typedef ptrdiff_t difference_type;
+ template<typename U> class rebind { typedef Allocator<U> other; };
+
+ explicit Allocator() {}
+ ~Allocator() {}
+ explicit Allocator(Allocator const&) {}
+ template<typename U>
+ explicit Allocator(Allocator<U> const&) {}
+
+ // address
+ pointer address(reference r) { return &r; }
+ const_pointer address(const_reference r) { return &r; }
+
+ pointer allocate(size_type count, const void* =0)
+ { return reinterpret_cast<pointer>(fastMalloc(count * sizeof (_Tp))); }
+
+ void deallocate(pointer p, size_type) {fastFree(p); }
+
+ size_type max_size() const
+ { return max(static_cast<_Tp>(-1)/sizeof(_Tp), 1); }
+
+ void construct(pointer p, const _Tp& v) { new(static_cast<void*>(p)) _Tp(v); }
+ void destroy(pointer p) { p->~_Tp(); }
+};
+
+/////////////////////// Vec (used as element of multi-channel images /////////////////////
+
+/*!
+ A helper class for cv::DataType
+
+ The class is specialized for each fundamental numerical data type supported by OpenCV.
+ It provides DataDepth<T>::value constant.
+*/
+template<typename _Tp> class DataDepth {};
+
+template<> class DataDepth<bool> { public: enum { value = CV_8U, fmt=(int)'u' }; };
+template<> class DataDepth<uchar> { public: enum { value = CV_8U, fmt=(int)'u' }; };
+template<> class DataDepth<schar> { public: enum { value = CV_8S, fmt=(int)'c' }; };
+template<> class DataDepth<char> { public: enum { value = CV_8S, fmt=(int)'c' }; };
+template<> class DataDepth<ushort> { public: enum { value = CV_16U, fmt=(int)'w' }; };
+template<> class DataDepth<short> { public: enum { value = CV_16S, fmt=(int)'s' }; };
+template<> class DataDepth<int> { public: enum { value = CV_32S, fmt=(int)'i' }; };
+// this is temporary solution to support 32-bit unsigned integers
+template<> class DataDepth<unsigned> { public: enum { value = CV_32S, fmt=(int)'i' }; };
+template<> class DataDepth<float> { public: enum { value = CV_32F, fmt=(int)'f' }; };
+template<> class DataDepth<double> { public: enum { value = CV_64F, fmt=(int)'d' }; };
+template<typename _Tp> class DataDepth<_Tp*> { public: enum { value = CV_USRTYPE1, fmt=(int)'r' }; };
+
+
+////////////////////////////// Small Matrix ///////////////////////////
+
+/*!
+ A short numerical vector.
+
+ This template class represents short numerical vectors (of 1, 2, 3, 4 ... elements)
+ on which you can perform basic arithmetical operations, access individual elements using [] operator etc.
+ The vectors are allocated on stack, as opposite to std::valarray, std::vector, cv::Mat etc.,
+ which elements are dynamically allocated in the heap.
+
+ The template takes 2 parameters:
+ -# _Tp element type
+ -# cn the number of elements
+
+ In addition to the universal notation like Vec<float, 3>, you can use shorter aliases
+ for the most popular specialized variants of Vec, e.g. Vec3f ~ Vec<float, 3>.
+ */
+
+struct CV_EXPORTS Matx_AddOp {};
+struct CV_EXPORTS Matx_SubOp {};
+struct CV_EXPORTS Matx_ScaleOp {};
+struct CV_EXPORTS Matx_MulOp {};
+struct CV_EXPORTS Matx_MatMulOp {};
+struct CV_EXPORTS Matx_TOp {};
+
+template<typename _Tp, int m, int n> class Matx
+{
+public:
+ typedef _Tp value_type;
+ typedef Matx<_Tp, (m < n ? m : n), 1> diag_type;
+ typedef Matx<_Tp, m, n> mat_type;
+ enum { depth = DataDepth<_Tp>::value, rows = m, cols = n, channels = rows*cols,
+ type = CV_MAKETYPE(depth, channels) };
+
+ //! default constructor
+ Matx();
+
+ Matx(_Tp v0); //!< 1x1 matrix
+ Matx(_Tp v0, _Tp v1); //!< 1x2 or 2x1 matrix
+ Matx(_Tp v0, _Tp v1, _Tp v2); //!< 1x3 or 3x1 matrix
+ Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3); //!< 1x4, 2x2 or 4x1 matrix
+ Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4); //!< 1x5 or 5x1 matrix
+ Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5); //!< 1x6, 2x3, 3x2 or 6x1 matrix
+ Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5, _Tp v6); //!< 1x7 or 7x1 matrix
+ Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5, _Tp v6, _Tp v7); //!< 1x8, 2x4, 4x2 or 8x1 matrix
+ Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5, _Tp v6, _Tp v7, _Tp v8); //!< 1x9, 3x3 or 9x1 matrix
+ Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5, _Tp v6, _Tp v7, _Tp v8, _Tp v9); //!< 1x10, 2x5 or 5x2 or 10x1 matrix
+ Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3,
+ _Tp v4, _Tp v5, _Tp v6, _Tp v7,
+ _Tp v8, _Tp v9, _Tp v10, _Tp v11); //!< 1x12, 2x6, 3x4, 4x3, 6x2 or 12x1 matrix
+ Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3,
+ _Tp v4, _Tp v5, _Tp v6, _Tp v7,
+ _Tp v8, _Tp v9, _Tp v10, _Tp v11,
+ _Tp v12, _Tp v13, _Tp v14, _Tp v15); //!< 1x16, 4x4 or 16x1 matrix
+ explicit Matx(const _Tp* vals); //!< initialize from a plain array
+
+ static Matx all(_Tp alpha);
+ static Matx zeros();
+ static Matx ones();
+ static Matx eye();
+ static Matx diag(const diag_type& d);
+ static Matx randu(_Tp a, _Tp b);
+ static Matx randn(_Tp a, _Tp b);
+
+ //! dot product computed with the default precision
+ _Tp dot(const Matx<_Tp, m, n>& v) const;
+
+ //! dot product computed in double-precision arithmetics
+ double ddot(const Matx<_Tp, m, n>& v) const;
+
+ //! conversion to another data type
+ template<typename T2> operator Matx<T2, m, n>() const;
+
+ //! change the matrix shape
+ template<int m1, int n1> Matx<_Tp, m1, n1> reshape() const;
+
+ //! extract part of the matrix
+ template<int m1, int n1> Matx<_Tp, m1, n1> get_minor(int i, int j) const;
+
+ //! extract the matrix row
+ Matx<_Tp, 1, n> row(int i) const;
+
+ //! extract the matrix column
+ Matx<_Tp, m, 1> col(int i) const;
+
+ //! extract the matrix diagonal
+ diag_type diag() const;
+
+ //! transpose the matrix
+ Matx<_Tp, n, m> t() const;
+
+ //! invert matrix the matrix
+ Matx<_Tp, n, m> inv(int method=DECOMP_LU) const;
+
+ //! solve linear system
+ template<int l> Matx<_Tp, n, l> solve(const Matx<_Tp, m, l>& rhs, int flags=DECOMP_LU) const;
+ Vec<_Tp, n> solve(const Vec<_Tp, m>& rhs, int method) const;
+
+ //! multiply two matrices element-wise
+ Matx<_Tp, m, n> mul(const Matx<_Tp, m, n>& a) const;
+
+ //! element access
+ const _Tp& operator ()(int i, int j) const;
+ _Tp& operator ()(int i, int j);
+
+ //! 1D element access
+ const _Tp& operator ()(int i) const;
+ _Tp& operator ()(int i);
+
+ Matx(const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b, Matx_AddOp);
+ Matx(const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b, Matx_SubOp);
+ template<typename _T2> Matx(const Matx<_Tp, m, n>& a, _T2 alpha, Matx_ScaleOp);
+ Matx(const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b, Matx_MulOp);
+ template<int l> Matx(const Matx<_Tp, m, l>& a, const Matx<_Tp, l, n>& b, Matx_MatMulOp);
+ Matx(const Matx<_Tp, n, m>& a, Matx_TOp);
+
+ _Tp val[m*n]; //< matrix elements
+};
+
+
+typedef Matx<float, 1, 2> Matx12f;
+typedef Matx<double, 1, 2> Matx12d;
+typedef Matx<float, 1, 3> Matx13f;
+typedef Matx<double, 1, 3> Matx13d;
+typedef Matx<float, 1, 4> Matx14f;
+typedef Matx<double, 1, 4> Matx14d;
+typedef Matx<float, 1, 6> Matx16f;
+typedef Matx<double, 1, 6> Matx16d;
+
+typedef Matx<float, 2, 1> Matx21f;
+typedef Matx<double, 2, 1> Matx21d;
+typedef Matx<float, 3, 1> Matx31f;
+typedef Matx<double, 3, 1> Matx31d;
+typedef Matx<float, 4, 1> Matx41f;
+typedef Matx<double, 4, 1> Matx41d;
+typedef Matx<float, 6, 1> Matx61f;
+typedef Matx<double, 6, 1> Matx61d;
+
+typedef Matx<float, 2, 2> Matx22f;
+typedef Matx<double, 2, 2> Matx22d;
+typedef Matx<float, 2, 3> Matx23f;
+typedef Matx<double, 2, 3> Matx23d;
+typedef Matx<float, 3, 2> Matx32f;
+typedef Matx<double, 3, 2> Matx32d;
+
+typedef Matx<float, 3, 3> Matx33f;
+typedef Matx<double, 3, 3> Matx33d;
+
+typedef Matx<float, 3, 4> Matx34f;
+typedef Matx<double, 3, 4> Matx34d;
+typedef Matx<float, 4, 3> Matx43f;
+typedef Matx<double, 4, 3> Matx43d;
+
+typedef Matx<float, 4, 4> Matx44f;
+typedef Matx<double, 4, 4> Matx44d;
+typedef Matx<float, 6, 6> Matx66f;
+typedef Matx<double, 6, 6> Matx66d;
+
+
+/*!
+ A short numerical vector.
+
+ This template class represents short numerical vectors (of 1, 2, 3, 4 ... elements)
+ on which you can perform basic arithmetical operations, access individual elements using [] operator etc.
+ The vectors are allocated on stack, as opposite to std::valarray, std::vector, cv::Mat etc.,
+ which elements are dynamically allocated in the heap.
+
+ The template takes 2 parameters:
+ -# _Tp element type
+ -# cn the number of elements
+
+ In addition to the universal notation like Vec<float, 3>, you can use shorter aliases
+ for the most popular specialized variants of Vec, e.g. Vec3f ~ Vec<float, 3>.
+*/
+template<typename _Tp, int cn> class Vec : public Matx<_Tp, cn, 1>
+{
+public:
+ typedef _Tp value_type;
+ enum { depth = DataDepth<_Tp>::value, channels = cn, type = CV_MAKETYPE(depth, channels) };
+
+ //! default constructor
+ Vec();
+
+ Vec(_Tp v0); //!< 1-element vector constructor
+ Vec(_Tp v0, _Tp v1); //!< 2-element vector constructor
+ Vec(_Tp v0, _Tp v1, _Tp v2); //!< 3-element vector constructor
+ Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3); //!< 4-element vector constructor
+ Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4); //!< 5-element vector constructor
+ Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5); //!< 6-element vector constructor
+ Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5, _Tp v6); //!< 7-element vector constructor
+ Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5, _Tp v6, _Tp v7); //!< 8-element vector constructor
+ Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5, _Tp v6, _Tp v7, _Tp v8); //!< 9-element vector constructor
+ Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5, _Tp v6, _Tp v7, _Tp v8, _Tp v9); //!< 10-element vector constructor
+ explicit Vec(const _Tp* values);
+
+ Vec(const Vec<_Tp, cn>& v);
+
+ static Vec all(_Tp alpha);
+
+ //! per-element multiplication
+ Vec mul(const Vec<_Tp, cn>& v) const;
+
+ //! conjugation (makes sense for complex numbers and quaternions)
+ Vec conj() const;
+
+ /*!
+ cross product of the two 3D vectors.
+
+ For other dimensionalities the exception is raised
+ */
+ Vec cross(const Vec& v) const;
+ //! conversion to another data type
+ template<typename T2> operator Vec<T2, cn>() const;
+ //! conversion to 4-element CvScalar.
+ operator CvScalar() const;
+
+ /*! element access */
+ const _Tp& operator [](int i) const;
+ _Tp& operator[](int i);
+ const _Tp& operator ()(int i) const;
+ _Tp& operator ()(int i);
+
+ Vec(const Matx<_Tp, cn, 1>& a, const Matx<_Tp, cn, 1>& b, Matx_AddOp);
+ Vec(const Matx<_Tp, cn, 1>& a, const Matx<_Tp, cn, 1>& b, Matx_SubOp);
+ template<typename _T2> Vec(const Matx<_Tp, cn, 1>& a, _T2 alpha, Matx_ScaleOp);
+};
+
+
+/* \typedef
+
+ Shorter aliases for the most popular specializations of Vec<T,n>
+*/
+typedef Vec<uchar, 2> Vec2b;
+typedef Vec<uchar, 3> Vec3b;
+typedef Vec<uchar, 4> Vec4b;
+
+typedef Vec<short, 2> Vec2s;
+typedef Vec<short, 3> Vec3s;
+typedef Vec<short, 4> Vec4s;
+
+typedef Vec<ushort, 2> Vec2w;
+typedef Vec<ushort, 3> Vec3w;
+typedef Vec<ushort, 4> Vec4w;
+
+typedef Vec<int, 2> Vec2i;
+typedef Vec<int, 3> Vec3i;
+typedef Vec<int, 4> Vec4i;
+typedef Vec<int, 6> Vec6i;
+typedef Vec<int, 8> Vec8i;
+
+typedef Vec<float, 2> Vec2f;
+typedef Vec<float, 3> Vec3f;
+typedef Vec<float, 4> Vec4f;
+typedef Vec<float, 6> Vec6f;
+
+typedef Vec<double, 2> Vec2d;
+typedef Vec<double, 3> Vec3d;
+typedef Vec<double, 4> Vec4d;
+typedef Vec<double, 6> Vec6d;
+
+
+//////////////////////////////// Complex //////////////////////////////
+
+/*!
+ A complex number class.
+
+ The template class is similar and compatible with std::complex, however it provides slightly
+ more convenient access to the real and imaginary parts using through the simple field access, as opposite
+ to std::complex::real() and std::complex::imag().
+*/
+template<typename _Tp> class Complex
+{
+public:
+
+ //! constructors
+ Complex();
+ Complex( _Tp _re, _Tp _im=0 );
+ Complex( const std::complex<_Tp>& c );
+
+ //! conversion to another data type
+ template<typename T2> operator Complex<T2>() const;
+ //! conjugation
+ Complex conj() const;
+ //! conversion to std::complex
+ operator std::complex<_Tp>() const;
+
+ _Tp re, im; //< the real and the imaginary parts
+};
+
+
+typedef Complex<float> Complexf;
+typedef Complex<double> Complexd;
+
+
+//////////////////////////////// Point_ ////////////////////////////////
+
+/*!
+ template 2D point class.
+
+ The class defines a point in 2D space. Data type of the point coordinates is specified
+ as a template parameter. There are a few shorter aliases available for user convenience.
+ See cv::Point, cv::Point2i, cv::Point2f and cv::Point2d.
+*/
+template<typename _Tp> class Point_
+{
+public:
+ typedef _Tp value_type;
+
+ // various constructors
+ Point_();
+ Point_(_Tp _x, _Tp _y);
+ Point_(const Point_& pt);
+ Point_(const CvPoint& pt);
+ Point_(const CvPoint2D32f& pt);
+ Point_(const Size_<_Tp>& sz);
+ Point_(const Vec<_Tp, 2>& v);
+
+ Point_& operator = (const Point_& pt);
+ //! conversion to another data type
+ template<typename _Tp2> operator Point_<_Tp2>() const;
+
+ //! conversion to the old-style C structures
+ operator CvPoint() const;
+ operator CvPoint2D32f() const;
+ operator Vec<_Tp, 2>() const;
+
+ //! dot product
+ _Tp dot(const Point_& pt) const;
+ //! dot product computed in double-precision arithmetics
+ double ddot(const Point_& pt) const;
+ //! cross-product
+ double cross(const Point_& pt) const;
+ //! checks whether the point is inside the specified rectangle
+ bool inside(const Rect_<_Tp>& r) const;
+
+ _Tp x, y; //< the point coordinates
+};
+
+/*!
+ template 3D point class.
+
+ The class defines a point in 3D space. Data type of the point coordinates is specified
+ as a template parameter.
+
+ \see cv::Point3i, cv::Point3f and cv::Point3d
+*/
+template<typename _Tp> class Point3_
+{
+public:
+ typedef _Tp value_type;
+
+ // various constructors
+ Point3_();
+ Point3_(_Tp _x, _Tp _y, _Tp _z);
+ Point3_(const Point3_& pt);
+ explicit Point3_(const Point_<_Tp>& pt);
+ Point3_(const CvPoint3D32f& pt);
+ Point3_(const Vec<_Tp, 3>& v);
+
+ Point3_& operator = (const Point3_& pt);
+ //! conversion to another data type
+ template<typename _Tp2> operator Point3_<_Tp2>() const;
+ //! conversion to the old-style CvPoint...
+ operator CvPoint3D32f() const;
+ //! conversion to cv::Vec<>
+ operator Vec<_Tp, 3>() const;
+
+ //! dot product
+ _Tp dot(const Point3_& pt) const;
+ //! dot product computed in double-precision arithmetics
+ double ddot(const Point3_& pt) const;
+ //! cross product of the 2 3D points
+ Point3_ cross(const Point3_& pt) const;
+
+ _Tp x, y, z; //< the point coordinates
+};
+
+//////////////////////////////// Size_ ////////////////////////////////
+
+/*!
+ The 2D size class
+
+ The class represents the size of a 2D rectangle, image size, matrix size etc.
+ Normally, cv::Size ~ cv::Size_<int> is used.
+*/
+template<typename _Tp> class Size_
+{
+public:
+ typedef _Tp value_type;
+
+ //! various constructors
+ Size_();
+ Size_(_Tp _width, _Tp _height);
+ Size_(const Size_& sz);
+ Size_(const CvSize& sz);
+ Size_(const CvSize2D32f& sz);
+ Size_(const Point_<_Tp>& pt);
+
+ Size_& operator = (const Size_& sz);
+ //! the area (width*height)
+ _Tp area() const;
+
+ //! conversion of another data type.
+ template<typename _Tp2> operator Size_<_Tp2>() const;
+
+ //! conversion to the old-style OpenCV types
+ operator CvSize() const;
+ operator CvSize2D32f() const;
+
+ _Tp width, height; // the width and the height
+};
+
+//////////////////////////////// Rect_ ////////////////////////////////
+
+/*!
+ The 2D up-right rectangle class
+
+ The class represents a 2D rectangle with coordinates of the specified data type.
+ Normally, cv::Rect ~ cv::Rect_<int> is used.
+*/
+template<typename _Tp> class Rect_
+{
+public:
+ typedef _Tp value_type;
+
+ //! various constructors
+ Rect_();
+ Rect_(_Tp _x, _Tp _y, _Tp _width, _Tp _height);
+ Rect_(const Rect_& r);
+ Rect_(const CvRect& r);
+ Rect_(const Point_<_Tp>& org, const Size_<_Tp>& sz);
+ Rect_(const Point_<_Tp>& pt1, const Point_<_Tp>& pt2);
+
+ Rect_& operator = ( const Rect_& r );
+ //! the top-left corner
+ Point_<_Tp> tl() const;
+ //! the bottom-right corner
+ Point_<_Tp> br() const;
+
+ //! size (width, height) of the rectangle
+ Size_<_Tp> size() const;
+ //! area (width*height) of the rectangle
+ _Tp area() const;
+
+ //! conversion to another data type
+ template<typename _Tp2> operator Rect_<_Tp2>() const;
+ //! conversion to the old-style CvRect
+ operator CvRect() const;
+
+ //! checks whether the rectangle contains the point
+ bool contains(const Point_<_Tp>& pt) const;
+
+ _Tp x, y, width, height; //< the top-left corner, as well as width and height of the rectangle
+};
+
+
+typedef Point_<int> Point2i;
+typedef Point2i Point;
+typedef Size_<int> Size2i;
+typedef Size_<double> Size2d;
+typedef Size2i Size;
+typedef Rect_<int> Rect;
+typedef Point_<float> Point2f;
+typedef Point_<double> Point2d;
+typedef Size_<float> Size2f;
+typedef Point3_<int> Point3i;
+typedef Point3_<float> Point3f;
+typedef Point3_<double> Point3d;
+
+
+/*!
+ The rotated 2D rectangle.
+
+ The class represents rotated (i.e. not up-right) rectangles on a plane.
+ Each rectangle is described by the center point (mass center), length of each side
+ (represented by cv::Size2f structure) and the rotation angle in degrees.
+*/
+class CV_EXPORTS RotatedRect
+{
+public:
+ //! various constructors
+ RotatedRect();
+ RotatedRect(const Point2f& center, const Size2f& size, float angle);
+ RotatedRect(const CvBox2D& box);
+
+ //! returns 4 vertices of the rectangle
+ void points(Point2f pts[]) const;
+ //! returns the minimal up-right rectangle containing the rotated rectangle
+ Rect boundingRect() const;
+ //! conversion to the old-style CvBox2D structure
+ operator CvBox2D() const;
+
+ Point2f center; //< the rectangle mass center
+ Size2f size; //< width and height of the rectangle
+ float angle; //< the rotation angle. When the angle is 0, 90, 180, 270 etc., the rectangle becomes an up-right rectangle.
+};
+
+//////////////////////////////// Scalar_ ///////////////////////////////
+
+/*!
+ The template scalar class.
+
+ This is partially specialized cv::Vec class with the number of elements = 4, i.e. a short vector of four elements.
+ Normally, cv::Scalar ~ cv::Scalar_<double> is used.
+*/
+template<typename _Tp> class Scalar_ : public Vec<_Tp, 4>
+{
+public:
+ //! various constructors
+ Scalar_();
+ Scalar_(_Tp v0, _Tp v1, _Tp v2=0, _Tp v3=0);
+ Scalar_(const CvScalar& s);
+ Scalar_(_Tp v0);
+
+ //! returns a scalar with all elements set to v0
+ static Scalar_<_Tp> all(_Tp v0);
+ //! conversion to the old-style CvScalar
+ operator CvScalar() const;
+
+ //! conversion to another data type
+ template<typename T2> operator Scalar_<T2>() const;
+
+ //! per-element product
+ Scalar_<_Tp> mul(const Scalar_<_Tp>& t, double scale=1 ) const;
+
+ // returns (v0, -v1, -v2, -v3)
+ Scalar_<_Tp> conj() const;
+
+ // returns true iff v1 == v2 == v3 == 0
+ bool isReal() const;
+};
+
+typedef Scalar_<double> Scalar;
+
+CV_EXPORTS void scalarToRawData(const Scalar& s, void* buf, int type, int unroll_to=0);
+
+//////////////////////////////// Range /////////////////////////////////
+
+/*!
+ The 2D range class
+
+ This is the class used to specify a continuous subsequence, i.e. part of a contour, or a column span in a matrix.
+*/
+class CV_EXPORTS Range
+{
+public:
+ Range();
+ Range(int _start, int _end);
+ Range(const CvSlice& slice);
+ int size() const;
+ bool empty() const;
+ static Range all();
+ operator CvSlice() const;
+
+ int start, end;
+};
+
+/////////////////////////////// DataType ////////////////////////////////
+
+/*!
+ Informative template class for OpenCV "scalars".
+
+ The class is specialized for each primitive numerical type supported by OpenCV (such as unsigned char or float),
+ as well as for more complex types, like cv::Complex<>, std::complex<>, cv::Vec<> etc.
+ The common property of all such types (called "scalars", do not confuse it with cv::Scalar_)
+ is that each of them is basically a tuple of numbers of the same type. Each "scalar" can be represented
+ by the depth id (CV_8U ... CV_64F) and the number of channels.
+ OpenCV matrices, 2D or nD, dense or sparse, can store "scalars",
+ as long as the number of channels does not exceed CV_CN_MAX.
+*/
+template<typename _Tp> class DataType
+{
+public:
+ typedef _Tp value_type;
+ typedef value_type work_type;
+ typedef value_type channel_type;
+ typedef value_type vec_type;
+ enum { generic_type = 1, depth = -1, channels = 1, fmt=0,
+ type = CV_MAKETYPE(depth, channels) };
+};
+
+template<> class DataType<bool>
+{
+public:
+ typedef bool value_type;
+ typedef int work_type;
+ typedef value_type channel_type;
+ typedef value_type vec_type;
+ enum { generic_type = 0, depth = DataDepth<channel_type>::value, channels = 1,
+ fmt=DataDepth<channel_type>::fmt,
+ type = CV_MAKETYPE(depth, channels) };
+};
+
+template<> class DataType<uchar>
+{
+public:
+ typedef uchar value_type;
+ typedef int work_type;
+ typedef value_type channel_type;
+ typedef value_type vec_type;
+ enum { generic_type = 0, depth = DataDepth<channel_type>::value, channels = 1,
+ fmt=DataDepth<channel_type>::fmt,
+ type = CV_MAKETYPE(depth, channels) };
+};
+
+template<> class DataType<schar>
+{
+public:
+ typedef schar value_type;
+ typedef int work_type;
+ typedef value_type channel_type;
+ typedef value_type vec_type;
+ enum { generic_type = 0, depth = DataDepth<channel_type>::value, channels = 1,
+ fmt=DataDepth<channel_type>::fmt,
+ type = CV_MAKETYPE(depth, channels) };
+};
+
+template<> class DataType<char>
+{
+public:
+ typedef schar value_type;
+ typedef int work_type;
+ typedef value_type channel_type;
+ typedef value_type vec_type;
+ enum { generic_type = 0, depth = DataDepth<channel_type>::value, channels = 1,
+ fmt=DataDepth<channel_type>::fmt,
+ type = CV_MAKETYPE(depth, channels) };
+};
+
+template<> class DataType<ushort>
+{
+public:
+ typedef ushort value_type;
+ typedef int work_type;
+ typedef value_type channel_type;
+ typedef value_type vec_type;
+ enum { generic_type = 0, depth = DataDepth<channel_type>::value, channels = 1,
+ fmt=DataDepth<channel_type>::fmt,
+ type = CV_MAKETYPE(depth, channels) };
+};
+
+template<> class DataType<short>
+{
+public:
+ typedef short value_type;
+ typedef int work_type;
+ typedef value_type channel_type;
+ typedef value_type vec_type;
+ enum { generic_type = 0, depth = DataDepth<channel_type>::value, channels = 1,
+ fmt=DataDepth<channel_type>::fmt,
+ type = CV_MAKETYPE(depth, channels) };
+};
+
+template<> class DataType<int>
+{
+public:
+ typedef int value_type;
+ typedef value_type work_type;
+ typedef value_type channel_type;
+ typedef value_type vec_type;
+ enum { generic_type = 0, depth = DataDepth<channel_type>::value, channels = 1,
+ fmt=DataDepth<channel_type>::fmt,
+ type = CV_MAKETYPE(depth, channels) };
+};
+
+template<> class DataType<float>
+{
+public:
+ typedef float value_type;
+ typedef value_type work_type;
+ typedef value_type channel_type;
+ typedef value_type vec_type;
+ enum { generic_type = 0, depth = DataDepth<channel_type>::value, channels = 1,
+ fmt=DataDepth<channel_type>::fmt,
+ type = CV_MAKETYPE(depth, channels) };
+};
+
+template<> class DataType<double>
+{
+public:
+ typedef double value_type;
+ typedef value_type work_type;
+ typedef value_type channel_type;
+ typedef value_type vec_type;
+ enum { generic_type = 0, depth = DataDepth<channel_type>::value, channels = 1,
+ fmt=DataDepth<channel_type>::fmt,
+ type = CV_MAKETYPE(depth, channels) };
+};
+
+template<typename _Tp, int m, int n> class DataType<Matx<_Tp, m, n> >
+{
+public:
+ typedef Matx<_Tp, m, n> value_type;
+ typedef Matx<typename DataType<_Tp>::work_type, m, n> work_type;
+ typedef _Tp channel_type;
+ typedef value_type vec_type;
+ enum { generic_type = 0, depth = DataDepth<channel_type>::value, channels = m*n,
+ fmt = ((channels-1)<<8) + DataDepth<channel_type>::fmt,
+ type = CV_MAKETYPE(depth, channels) };
+};
+
+template<typename _Tp, int cn> class DataType<Vec<_Tp, cn> >
+{
+public:
+ typedef Vec<_Tp, cn> value_type;
+ typedef Vec<typename DataType<_Tp>::work_type, cn> work_type;
+ typedef _Tp channel_type;
+ typedef value_type vec_type;
+ enum { generic_type = 0, depth = DataDepth<channel_type>::value, channels = cn,
+ fmt = ((channels-1)<<8) + DataDepth<channel_type>::fmt,
+ type = CV_MAKETYPE(depth, channels) };
+};
+
+template<typename _Tp> class DataType<std::complex<_Tp> >
+{
+public:
+ typedef std::complex<_Tp> value_type;
+ typedef value_type work_type;
+ typedef _Tp channel_type;
+ enum { generic_type = 0, depth = DataDepth<channel_type>::value, channels = 2,
+ fmt = ((channels-1)<<8) + DataDepth<channel_type>::fmt,
+ type = CV_MAKETYPE(depth, channels) };
+ typedef Vec<channel_type, channels> vec_type;
+};
+
+template<typename _Tp> class DataType<Complex<_Tp> >
+{
+public:
+ typedef Complex<_Tp> value_type;
+ typedef value_type work_type;
+ typedef _Tp channel_type;
+ enum { generic_type = 0, depth = DataDepth<channel_type>::value, channels = 2,
+ fmt = ((channels-1)<<8) + DataDepth<channel_type>::fmt,
+ type = CV_MAKETYPE(depth, channels) };
+ typedef Vec<channel_type, channels> vec_type;
+};
+
+template<typename _Tp> class DataType<Point_<_Tp> >
+{
+public:
+ typedef Point_<_Tp> value_type;
+ typedef Point_<typename DataType<_Tp>::work_type> work_type;
+ typedef _Tp channel_type;
+ enum { generic_type = 0, depth = DataDepth<channel_type>::value, channels = 2,
+ fmt = ((channels-1)<<8) + DataDepth<channel_type>::fmt,
+ type = CV_MAKETYPE(depth, channels) };
+ typedef Vec<channel_type, channels> vec_type;
+};
+
+template<typename _Tp> class DataType<Point3_<_Tp> >
+{
+public:
+ typedef Point3_<_Tp> value_type;
+ typedef Point3_<typename DataType<_Tp>::work_type> work_type;
+ typedef _Tp channel_type;
+ enum { generic_type = 0, depth = DataDepth<channel_type>::value, channels = 3,
+ fmt = ((channels-1)<<8) + DataDepth<channel_type>::fmt,
+ type = CV_MAKETYPE(depth, channels) };
+ typedef Vec<channel_type, channels> vec_type;
+};
+
+template<typename _Tp> class DataType<Size_<_Tp> >
+{
+public:
+ typedef Size_<_Tp> value_type;
+ typedef Size_<typename DataType<_Tp>::work_type> work_type;
+ typedef _Tp channel_type;
+ enum { generic_type = 0, depth = DataDepth<channel_type>::value, channels = 2,
+ fmt = ((channels-1)<<8) + DataDepth<channel_type>::fmt,
+ type = CV_MAKETYPE(depth, channels) };
+ typedef Vec<channel_type, channels> vec_type;
+};
+
+template<typename _Tp> class DataType<Rect_<_Tp> >
+{
+public:
+ typedef Rect_<_Tp> value_type;
+ typedef Rect_<typename DataType<_Tp>::work_type> work_type;
+ typedef _Tp channel_type;
+ enum { generic_type = 0, depth = DataDepth<channel_type>::value, channels = 4,
+ fmt = ((channels-1)<<8) + DataDepth<channel_type>::fmt,
+ type = CV_MAKETYPE(depth, channels) };
+ typedef Vec<channel_type, channels> vec_type;
+};
+
+template<typename _Tp> class DataType<Scalar_<_Tp> >
+{
+public:
+ typedef Scalar_<_Tp> value_type;
+ typedef Scalar_<typename DataType<_Tp>::work_type> work_type;
+ typedef _Tp channel_type;
+ enum { generic_type = 0, depth = DataDepth<channel_type>::value, channels = 4,
+ fmt = ((channels-1)<<8) + DataDepth<channel_type>::fmt,
+ type = CV_MAKETYPE(depth, channels) };
+ typedef Vec<channel_type, channels> vec_type;
+};
+
+template<> class DataType<Range>
+{
+public:
+ typedef Range value_type;
+ typedef value_type work_type;
+ typedef int channel_type;
+ enum { generic_type = 0, depth = DataDepth<channel_type>::value, channels = 2,
+ fmt = ((channels-1)<<8) + DataDepth<channel_type>::fmt,
+ type = CV_MAKETYPE(depth, channels) };
+ typedef Vec<channel_type, channels> vec_type;
+};
+
+//////////////////// generic_type ref-counting pointer class for C/C++ objects ////////////////////////
+
+/*!
+ Smart pointer to dynamically allocated objects.
+
+ This is template pointer-wrapping class that stores the associated reference counter along with the
+ object pointer. The class is similar to std::smart_ptr<> from the recent addons to the C++ standard,
+ but is shorter to write :) and self-contained (i.e. does add any dependency on the compiler or an external library).
+
+ Basically, you can use "Ptr<MyObjectType> ptr" (or faster "const Ptr<MyObjectType>& ptr" for read-only access)
+ everywhere instead of "MyObjectType* ptr", where MyObjectType is some C structure or a C++ class.
+ To make it all work, you need to specialize Ptr<>::delete_obj(), like:
+
+ \code
+ template<> void Ptr<MyObjectType>::delete_obj() { call_destructor_func(obj); }
+ \endcode
+
+ \note{if MyObjectType is a C++ class with a destructor, you do not need to specialize delete_obj(),
+ since the default implementation calls "delete obj;"}
+
+ \note{Another good property of the class is that the operations on the reference counter are atomic,
+ i.e. it is safe to use the class in multi-threaded applications}
+*/
+template<typename _Tp> class Ptr
+{
+public:
+ //! empty constructor
+ Ptr();
+ //! take ownership of the pointer. The associated reference counter is allocated and set to 1
+ Ptr(_Tp* _obj);
+ //! calls release()
+ ~Ptr();
+ //! copy constructor. Copies the members and calls addref()
+ Ptr(const Ptr& ptr);
+ template<typename _Tp2> Ptr(const Ptr<_Tp2>& ptr);
+ //! copy operator. Calls ptr.addref() and release() before copying the members
+ Ptr& operator = (const Ptr& ptr);
+ //! increments the reference counter
+ void addref();
+ //! decrements the reference counter. If it reaches 0, delete_obj() is called
+ void release();
+ //! deletes the object. Override if needed
+ void delete_obj();
+ //! returns true iff obj==NULL
+ bool empty() const;
+
+ //! cast pointer to another type
+ template<typename _Tp2> Ptr<_Tp2> ptr();
+ template<typename _Tp2> const Ptr<_Tp2> ptr() const;
+
+ //! helper operators making "Ptr<T> ptr" use very similar to "T* ptr".
+ _Tp* operator -> ();
+ const _Tp* operator -> () const;
+
+ operator _Tp* ();
+ operator const _Tp*() const;
+
+ _Tp* obj; //< the object pointer.
+ int* refcount; //< the associated reference counter
+};
+
+template<typename T>
+Ptr<T> makePtr();
+
+template<typename T, typename A1>
+Ptr<T> makePtr(const A1& a1);
+
+template<typename T, typename A1, typename A2>
+Ptr<T> makePtr(const A1& a1, const A2& a2);
+
+template<typename T, typename A1, typename A2, typename A3>
+Ptr<T> makePtr(const A1& a1, const A2& a2, const A3& a3);
+
+template<typename T, typename A1, typename A2, typename A3, typename A4>
+Ptr<T> makePtr(const A1& a1, const A2& a2, const A3& a3, const A4& a4);
+
+template<typename T, typename A1, typename A2, typename A3, typename A4, typename A5>
+Ptr<T> makePtr(const A1& a1, const A2& a2, const A3& a3, const A4& a4, const A5& a5);
+
+template<typename T, typename A1, typename A2, typename A3, typename A4, typename A5, typename A6>
+Ptr<T> makePtr(const A1& a1, const A2& a2, const A3& a3, const A4& a4, const A5& a5, const A6& a6);
+
+template<typename T, typename A1, typename A2, typename A3, typename A4, typename A5, typename A6, typename A7>
+Ptr<T> makePtr(const A1& a1, const A2& a2, const A3& a3, const A4& a4, const A5& a5, const A6& a6, const A7& a7);
+
+template<typename T, typename A1, typename A2, typename A3, typename A4, typename A5, typename A6, typename A7, typename A8>
+Ptr<T> makePtr(const A1& a1, const A2& a2, const A3& a3, const A4& a4, const A5& a5, const A6& a6, const A7& a7, const A8& a8);
+
+template<typename T, typename A1, typename A2, typename A3, typename A4, typename A5, typename A6, typename A7, typename A8, typename A9>
+Ptr<T> makePtr(const A1& a1, const A2& a2, const A3& a3, const A4& a4, const A5& a5, const A6& a6, const A7& a7, const A8& a8, const A9& a9);
+
+template<typename T, typename A1, typename A2, typename A3, typename A4, typename A5, typename A6, typename A7, typename A8, typename A9, typename A10>
+Ptr<T> makePtr(const A1& a1, const A2& a2, const A3& a3, const A4& a4, const A5& a5, const A6& a6, const A7& a7, const A8& a8, const A9& a9, const A10& a10);
+
+//////////////////////// Input/Output Array Arguments /////////////////////////////////
+
+/*!
+ Proxy datatype for passing Mat's and vector<>'s as input parameters
+ */
+class CV_EXPORTS _InputArray
+{
+public:
+ enum {
+ KIND_SHIFT = 16,
+ FIXED_TYPE = 0x8000 << KIND_SHIFT,
+ FIXED_SIZE = 0x4000 << KIND_SHIFT,
+ KIND_MASK = ~(FIXED_TYPE|FIXED_SIZE) - (1 << KIND_SHIFT) + 1,
+
+ NONE = 0 << KIND_SHIFT,
+ MAT = 1 << KIND_SHIFT,
+ MATX = 2 << KIND_SHIFT,
+ STD_VECTOR = 3 << KIND_SHIFT,
+ STD_VECTOR_VECTOR = 4 << KIND_SHIFT,
+ STD_VECTOR_MAT = 5 << KIND_SHIFT,
+ EXPR = 6 << KIND_SHIFT,
+ OPENGL_BUFFER = 7 << KIND_SHIFT,
+ OPENGL_TEXTURE = 8 << KIND_SHIFT,
+ GPU_MAT = 9 << KIND_SHIFT,
+ OCL_MAT =10 << KIND_SHIFT
+ };
+ _InputArray();
+
+ _InputArray(const Mat& m);
+ _InputArray(const MatExpr& expr);
+ template<typename _Tp> _InputArray(const _Tp* vec, int n);
+ template<typename _Tp> _InputArray(const vector<_Tp>& vec);
+ template<typename _Tp> _InputArray(const vector<vector<_Tp> >& vec);
+ _InputArray(const vector<Mat>& vec);
+ template<typename _Tp> _InputArray(const vector<Mat_<_Tp> >& vec);
+ template<typename _Tp> _InputArray(const Mat_<_Tp>& m);
+ template<typename _Tp, int m, int n> _InputArray(const Matx<_Tp, m, n>& matx);
+ _InputArray(const Scalar& s);
+ _InputArray(const double& val);
+ // < Deprecated
+ _InputArray(const GlBuffer& buf);
+ _InputArray(const GlTexture& tex);
+ // >
+ _InputArray(const gpu::GpuMat& d_mat);
+ _InputArray(const ogl::Buffer& buf);
+ _InputArray(const ogl::Texture2D& tex);
+
+ virtual Mat getMat(int i=-1) const;
+ virtual void getMatVector(vector<Mat>& mv) const;
+ // < Deprecated
+ virtual GlBuffer getGlBuffer() const;
+ virtual GlTexture getGlTexture() const;
+ // >
+ virtual gpu::GpuMat getGpuMat() const;
+ /*virtual*/ ogl::Buffer getOGlBuffer() const;
+ /*virtual*/ ogl::Texture2D getOGlTexture2D() const;
+
+ virtual int kind() const;
+ virtual Size size(int i=-1) const;
+ virtual size_t total(int i=-1) const;
+ virtual int type(int i=-1) const;
+ virtual int depth(int i=-1) const;
+ virtual int channels(int i=-1) const;
+ virtual bool empty() const;
+
+#ifdef OPENCV_CAN_BREAK_BINARY_COMPATIBILITY
+ virtual ~_InputArray();
+#endif
+
+ int flags;
+ void* obj;
+ Size sz;
+};
+
+
+enum
+{
+ DEPTH_MASK_8U = 1 << CV_8U,
+ DEPTH_MASK_8S = 1 << CV_8S,
+ DEPTH_MASK_16U = 1 << CV_16U,
+ DEPTH_MASK_16S = 1 << CV_16S,
+ DEPTH_MASK_32S = 1 << CV_32S,
+ DEPTH_MASK_32F = 1 << CV_32F,
+ DEPTH_MASK_64F = 1 << CV_64F,
+ DEPTH_MASK_ALL = (DEPTH_MASK_64F<<1)-1,
+ DEPTH_MASK_ALL_BUT_8S = DEPTH_MASK_ALL & ~DEPTH_MASK_8S,
+ DEPTH_MASK_FLT = DEPTH_MASK_32F + DEPTH_MASK_64F
+};
+
+
+/*!
+ Proxy datatype for passing Mat's and vector<>'s as input parameters
+ */
+class CV_EXPORTS _OutputArray : public _InputArray
+{
+public:
+ _OutputArray();
+
+ _OutputArray(Mat& m);
+ template<typename _Tp> _OutputArray(vector<_Tp>& vec);
+ template<typename _Tp> _OutputArray(vector<vector<_Tp> >& vec);
+ _OutputArray(vector<Mat>& vec);
+ template<typename _Tp> _OutputArray(vector<Mat_<_Tp> >& vec);
+ template<typename _Tp> _OutputArray(Mat_<_Tp>& m);
+ template<typename _Tp, int m, int n> _OutputArray(Matx<_Tp, m, n>& matx);
+ template<typename _Tp> _OutputArray(_Tp* vec, int n);
+ _OutputArray(gpu::GpuMat& d_mat);
+ _OutputArray(ogl::Buffer& buf);
+ _OutputArray(ogl::Texture2D& tex);
+
+ _OutputArray(const Mat& m);
+ template<typename _Tp> _OutputArray(const vector<_Tp>& vec);
+ template<typename _Tp> _OutputArray(const vector<vector<_Tp> >& vec);
+ _OutputArray(const vector<Mat>& vec);
+ template<typename _Tp> _OutputArray(const vector<Mat_<_Tp> >& vec);
+ template<typename _Tp> _OutputArray(const Mat_<_Tp>& m);
+ template<typename _Tp, int m, int n> _OutputArray(const Matx<_Tp, m, n>& matx);
+ template<typename _Tp> _OutputArray(const _Tp* vec, int n);
+ _OutputArray(const gpu::GpuMat& d_mat);
+ _OutputArray(const ogl::Buffer& buf);
+ _OutputArray(const ogl::Texture2D& tex);
+
+ virtual bool fixedSize() const;
+ virtual bool fixedType() const;
+ virtual bool needed() const;
+ virtual Mat& getMatRef(int i=-1) const;
+ /*virtual*/ gpu::GpuMat& getGpuMatRef() const;
+ /*virtual*/ ogl::Buffer& getOGlBufferRef() const;
+ /*virtual*/ ogl::Texture2D& getOGlTexture2DRef() const;
+ virtual void create(Size sz, int type, int i=-1, bool allowTransposed=false, int fixedDepthMask=0) const;
+ virtual void create(int rows, int cols, int type, int i=-1, bool allowTransposed=false, int fixedDepthMask=0) const;
+ virtual void create(int dims, const int* size, int type, int i=-1, bool allowTransposed=false, int fixedDepthMask=0) const;
+ virtual void release() const;
+ virtual void clear() const;
+
+#ifdef OPENCV_CAN_BREAK_BINARY_COMPATIBILITY
+ virtual ~_OutputArray();
+#endif
+};
+
+typedef const _InputArray& InputArray;
+typedef InputArray InputArrayOfArrays;
+typedef const _OutputArray& OutputArray;
+typedef OutputArray OutputArrayOfArrays;
+typedef OutputArray InputOutputArray;
+typedef OutputArray InputOutputArrayOfArrays;
+
+CV_EXPORTS OutputArray noArray();
+
+/////////////////////////////////////// Mat ///////////////////////////////////////////
+
+enum { MAGIC_MASK=0xFFFF0000, TYPE_MASK=0x00000FFF, DEPTH_MASK=7 };
+
+static inline size_t getElemSize(int type) { return CV_ELEM_SIZE(type); }
+
+/*!
+ Custom array allocator
+
+*/
+class CV_EXPORTS MatAllocator
+{
+public:
+ MatAllocator() {}
+ virtual ~MatAllocator() {}
+ virtual void allocate(int dims, const int* sizes, int type, int*& refcount,
+ uchar*& datastart, uchar*& data, size_t* step) = 0;
+ virtual void deallocate(int* refcount, uchar* datastart, uchar* data) = 0;
+};
+
+/*!
+ The n-dimensional matrix class.
+
+ The class represents an n-dimensional dense numerical array that can act as
+ a matrix, image, optical flow map, 3-focal tensor etc.
+ It is very similar to CvMat and CvMatND types from earlier versions of OpenCV,
+ and similarly to those types, the matrix can be multi-channel. It also fully supports ROI mechanism.
+
+ There are many different ways to create cv::Mat object. Here are the some popular ones:
+ <ul>
+ <li> using cv::Mat::create(nrows, ncols, type) method or
+ the similar constructor cv::Mat::Mat(nrows, ncols, type[, fill_value]) constructor.
+ A new matrix of the specified size and specifed type will be allocated.
+ "type" has the same meaning as in cvCreateMat function,
+ e.g. CV_8UC1 means 8-bit single-channel matrix, CV_32FC2 means 2-channel (i.e. complex)
+ floating-point matrix etc:
+
+ \code
+ // make 7x7 complex matrix filled with 1+3j.
+ cv::Mat M(7,7,CV_32FC2,Scalar(1,3));
+ // and now turn M to 100x60 15-channel 8-bit matrix.
+ // The old content will be deallocated
+ M.create(100,60,CV_8UC(15));
+ \endcode
+
+ As noted in the introduction of this chapter, Mat::create()
+ will only allocate a new matrix when the current matrix dimensionality
+ or type are different from the specified.
+
+ <li> by using a copy constructor or assignment operator, where on the right side it can
+ be a matrix or expression, see below. Again, as noted in the introduction,
+ matrix assignment is O(1) operation because it only copies the header
+ and increases the reference counter. cv::Mat::clone() method can be used to get a full
+ (a.k.a. deep) copy of the matrix when you need it.
+
+ <li> by constructing a header for a part of another matrix. It can be a single row, single column,
+ several rows, several columns, rectangular region in the matrix (called a minor in algebra) or
+ a diagonal. Such operations are also O(1), because the new header will reference the same data.
+ You can actually modify a part of the matrix using this feature, e.g.
+
+ \code
+ // add 5-th row, multiplied by 3 to the 3rd row
+ M.row(3) = M.row(3) + M.row(5)*3;
+
+ // now copy 7-th column to the 1-st column
+ // M.col(1) = M.col(7); // this will not work
+ Mat M1 = M.col(1);
+ M.col(7).copyTo(M1);
+
+ // create new 320x240 image
+ cv::Mat img(Size(320,240),CV_8UC3);
+ // select a roi
+ cv::Mat roi(img, Rect(10,10,100,100));
+ // fill the ROI with (0,255,0) (which is green in RGB space);
+ // the original 320x240 image will be modified
+ roi = Scalar(0,255,0);
+ \endcode
+
+ Thanks to the additional cv::Mat::datastart and cv::Mat::dataend members, it is possible to
+ compute the relative sub-matrix position in the main "container" matrix using cv::Mat::locateROI():
+
+ \code
+ Mat A = Mat::eye(10, 10, CV_32S);
+ // extracts A columns, 1 (inclusive) to 3 (exclusive).
+ Mat B = A(Range::all(), Range(1, 3));
+ // extracts B rows, 5 (inclusive) to 9 (exclusive).
+ // that is, C ~ A(Range(5, 9), Range(1, 3))
+ Mat C = B(Range(5, 9), Range::all());
+ Size size; Point ofs;
+ C.locateROI(size, ofs);
+ // size will be (width=10,height=10) and the ofs will be (x=1, y=5)
+ \endcode
+
+ As in the case of whole matrices, if you need a deep copy, use cv::Mat::clone() method
+ of the extracted sub-matrices.
+
+ <li> by making a header for user-allocated-data. It can be useful for
+ <ol>
+ <li> processing "foreign" data using OpenCV (e.g. when you implement
+ a DirectShow filter or a processing module for gstreamer etc.), e.g.
+
+ \code
+ void process_video_frame(const unsigned char* pixels,
+ int width, int height, int step)
+ {
+ cv::Mat img(height, width, CV_8UC3, pixels, step);
+ cv::GaussianBlur(img, img, cv::Size(7,7), 1.5, 1.5);
+ }
+ \endcode
+
+ <li> for quick initialization of small matrices and/or super-fast element access
+
+ \code
+ double m[3][3] = {{a, b, c}, {d, e, f}, {g, h, i}};
+ cv::Mat M = cv::Mat(3, 3, CV_64F, m).inv();
+ \endcode
+ </ol>
+
+ partial yet very common cases of this "user-allocated data" case are conversions
+ from CvMat and IplImage to cv::Mat. For this purpose there are special constructors
+ taking pointers to CvMat or IplImage and the optional
+ flag indicating whether to copy the data or not.
+
+ Backward conversion from cv::Mat to CvMat or IplImage is provided via cast operators
+ cv::Mat::operator CvMat() an cv::Mat::operator IplImage().
+ The operators do not copy the data.
+
+
+ \code
+ IplImage* img = cvLoadImage("greatwave.jpg", 1);
+ Mat mtx(img); // convert IplImage* -> cv::Mat
+ CvMat oldmat = mtx; // convert cv::Mat -> CvMat
+ CV_Assert(oldmat.cols == img->width && oldmat.rows == img->height &&
+ oldmat.data.ptr == (uchar*)img->imageData && oldmat.step == img->widthStep);
+ \endcode
+
+ <li> by using MATLAB-style matrix initializers, cv::Mat::zeros(), cv::Mat::ones(), cv::Mat::eye(), e.g.:
+
+ \code
+ // create a double-precision identity martix and add it to M.
+ M += Mat::eye(M.rows, M.cols, CV_64F);
+ \endcode
+
+ <li> by using comma-separated initializer:
+
+ \code
+ // create 3x3 double-precision identity matrix
+ Mat M = (Mat_<double>(3,3) << 1, 0, 0, 0, 1, 0, 0, 0, 1);
+ \endcode
+
+ here we first call constructor of cv::Mat_ class (that we describe further) with the proper matrix,
+ and then we just put "<<" operator followed by comma-separated values that can be constants,
+ variables, expressions etc. Also, note the extra parentheses that are needed to avoid compiler errors.
+
+ </ul>
+
+ Once matrix is created, it will be automatically managed by using reference-counting mechanism
+ (unless the matrix header is built on top of user-allocated data,
+ in which case you should handle the data by yourself).
+ The matrix data will be deallocated when no one points to it;
+ if you want to release the data pointed by a matrix header before the matrix destructor is called,
+ use cv::Mat::release().
+
+ The next important thing to learn about the matrix class is element access. Here is how the matrix is stored.
+ The elements are stored in row-major order (row by row). The cv::Mat::data member points to the first element of the first row,
+ cv::Mat::rows contains the number of matrix rows and cv::Mat::cols - the number of matrix columns. There is yet another member,
+ cv::Mat::step that is used to actually compute address of a matrix element. cv::Mat::step is needed because the matrix can be
+ a part of another matrix or because there can some padding space in the end of each row for a proper alignment.
+
+ Given these parameters, address of the matrix element M_{ij} is computed as following:
+
+ addr(M_{ij})=M.data + M.step*i + j*M.elemSize()
+
+ if you know the matrix element type, e.g. it is float, then you can use cv::Mat::at() method:
+
+ addr(M_{ij})=&M.at<float>(i,j)
+
+ (where & is used to convert the reference returned by cv::Mat::at() to a pointer).
+ if you need to process a whole row of matrix, the most efficient way is to get
+ the pointer to the row first, and then just use plain C operator []:
+
+ \code
+ // compute sum of positive matrix elements
+ // (assuming that M is double-precision matrix)
+ double sum=0;
+ for(int i = 0; i < M.rows; i++)
+ {
+ const double* Mi = M.ptr<double>(i);
+ for(int j = 0; j < M.cols; j++)
+ sum += std::max(Mi[j], 0.);
+ }
+ \endcode
+
+ Some operations, like the above one, do not actually depend on the matrix shape,
+ they just process elements of a matrix one by one (or elements from multiple matrices
+ that are sitting in the same place, e.g. matrix addition). Such operations are called
+ element-wise and it makes sense to check whether all the input/output matrices are continuous,
+ i.e. have no gaps in the end of each row, and if yes, process them as a single long row:
+
+ \code
+ // compute sum of positive matrix elements, optimized variant
+ double sum=0;
+ int cols = M.cols, rows = M.rows;
+ if(M.isContinuous())
+ {
+ cols *= rows;
+ rows = 1;
+ }
+ for(int i = 0; i < rows; i++)
+ {
+ const double* Mi = M.ptr<double>(i);
+ for(int j = 0; j < cols; j++)
+ sum += std::max(Mi[j], 0.);
+ }
+ \endcode
+ in the case of continuous matrix the outer loop body will be executed just once,
+ so the overhead will be smaller, which will be especially noticeable in the case of small matrices.
+
+ Finally, there are STL-style iterators that are smart enough to skip gaps between successive rows:
+ \code
+ // compute sum of positive matrix elements, iterator-based variant
+ double sum=0;
+ MatConstIterator_<double> it = M.begin<double>(), it_end = M.end<double>();
+ for(; it != it_end; ++it)
+ sum += std::max(*it, 0.);
+ \endcode
+
+ The matrix iterators are random-access iterators, so they can be passed
+ to any STL algorithm, including std::sort().
+*/
+class CV_EXPORTS Mat
+{
+public:
+ //! default constructor
+ Mat();
+ //! constructs 2D matrix of the specified size and type
+ // (_type is CV_8UC1, CV_64FC3, CV_32SC(12) etc.)
+ Mat(int rows, int cols, int type);
+ Mat(Size size, int type);
+ //! constucts 2D matrix and fills it with the specified value _s.
+ Mat(int rows, int cols, int type, const Scalar& s);
+ Mat(Size size, int type, const Scalar& s);
+
+ //! constructs n-dimensional matrix
+ Mat(int ndims, const int* sizes, int type);
+ Mat(int ndims, const int* sizes, int type, const Scalar& s);
+
+ //! copy constructor
+ Mat(const Mat& m);
+ //! constructor for matrix headers pointing to user-allocated data
+ Mat(int rows, int cols, int type, void* data, size_t step=AUTO_STEP);
+ Mat(Size size, int type, void* data, size_t step=AUTO_STEP);
+ Mat(int ndims, const int* sizes, int type, void* data, const size_t* steps=0);
+
+ //! creates a matrix header for a part of the bigger matrix
+ Mat(const Mat& m, const Range& rowRange, const Range& colRange=Range::all());
+ Mat(const Mat& m, const Rect& roi);
+ Mat(const Mat& m, const Range* ranges);
+ //! converts old-style CvMat to the new matrix; the data is not copied by default
+ Mat(const CvMat* m, bool copyData=false);
+ //! converts old-style CvMatND to the new matrix; the data is not copied by default
+ Mat(const CvMatND* m, bool copyData=false);
+ //! converts old-style IplImage to the new matrix; the data is not copied by default
+ Mat(const IplImage* img, bool copyData=false);
+ //! builds matrix from std::vector with or without copying the data
+ template<typename _Tp> explicit Mat(const vector<_Tp>& vec, bool copyData=false);
+ //! builds matrix from cv::Vec; the data is copied by default
+ template<typename _Tp, int n> explicit Mat(const Vec<_Tp, n>& vec, bool copyData=true);
+ //! builds matrix from cv::Matx; the data is copied by default
+ template<typename _Tp, int m, int n> explicit Mat(const Matx<_Tp, m, n>& mtx, bool copyData=true);
+ //! builds matrix from a 2D point
+ template<typename _Tp> explicit Mat(const Point_<_Tp>& pt, bool copyData=true);
+ //! builds matrix from a 3D point
+ template<typename _Tp> explicit Mat(const Point3_<_Tp>& pt, bool copyData=true);
+ //! builds matrix from comma initializer
+ template<typename _Tp> explicit Mat(const MatCommaInitializer_<_Tp>& commaInitializer);
+
+ //! download data from GpuMat
+ explicit Mat(const gpu::GpuMat& m);
+
+ //! destructor - calls release()
+ ~Mat();
+ //! assignment operators
+ Mat& operator = (const Mat& m);
+ Mat& operator = (const MatExpr& expr);
+
+ //! returns a new matrix header for the specified row
+ Mat row(int y) const;
+ //! returns a new matrix header for the specified column
+ Mat col(int x) const;
+ //! ... for the specified row span
+ Mat rowRange(int startrow, int endrow) const;
+ Mat rowRange(const Range& r) const;
+ //! ... for the specified column span
+ Mat colRange(int startcol, int endcol) const;
+ Mat colRange(const Range& r) const;
+ //! ... for the specified diagonal
+ // (d=0 - the main diagonal,
+ // >0 - a diagonal from the lower half,
+ // <0 - a diagonal from the upper half)
+ Mat diag(int d=0) const;
+ //! constructs a square diagonal matrix which main diagonal is vector "d"
+ static Mat diag(const Mat& d);
+
+ //! returns deep copy of the matrix, i.e. the data is copied
+ Mat clone() const;
+ //! copies the matrix content to "m".
+ // It calls m.create(this->size(), this->type()).
+ void copyTo( OutputArray m ) const;
+ //! copies those matrix elements to "m" that are marked with non-zero mask elements.
+ void copyTo( OutputArray m, InputArray mask ) const;
+ //! converts matrix to another datatype with optional scalng. See cvConvertScale.
+ void convertTo( OutputArray m, int rtype, double alpha=1, double beta=0 ) const;
+
+ void assignTo( Mat& m, int type=-1 ) const;
+
+ //! sets every matrix element to s
+ Mat& operator = (const Scalar& s);
+ //! sets some of the matrix elements to s, according to the mask
+ Mat& setTo(InputArray value, InputArray mask=noArray());
+ //! creates alternative matrix header for the same data, with different
+ // number of channels and/or different number of rows. see cvReshape.
+ Mat reshape(int cn, int rows=0) const;
+ Mat reshape(int cn, int newndims, const int* newsz) const;
+
+ //! matrix transposition by means of matrix expressions
+ MatExpr t() const;
+ //! matrix inversion by means of matrix expressions
+ MatExpr inv(int method=DECOMP_LU) const;
+ //! per-element matrix multiplication by means of matrix expressions
+ MatExpr mul(InputArray m, double scale=1) const;
+
+ //! computes cross-product of 2 3D vectors
+ Mat cross(InputArray m) const;
+ //! computes dot-product
+ double dot(InputArray m) const;
+
+ //! Matlab-style matrix initialization
+ static MatExpr zeros(int rows, int cols, int type);
+ static MatExpr zeros(Size size, int type);
+ static MatExpr zeros(int ndims, const int* sz, int type);
+ static MatExpr ones(int rows, int cols, int type);
+ static MatExpr ones(Size size, int type);
+ static MatExpr ones(int ndims, const int* sz, int type);
+ static MatExpr eye(int rows, int cols, int type);
+ static MatExpr eye(Size size, int type);
+
+ //! allocates new matrix data unless the matrix already has specified size and type.
+ // previous data is unreferenced if needed.
+ void create(int rows, int cols, int type);
+ void create(Size size, int type);
+ void create(int ndims, const int* sizes, int type);
+
+ //! increases the reference counter; use with care to avoid memleaks
+ void addref();
+ //! decreases reference counter;
+ // deallocates the data when reference counter reaches 0.
+ void release();
+
+ //! deallocates the matrix data
+ void deallocate();
+ //! internal use function; properly re-allocates _size, _step arrays
+ void copySize(const Mat& m);
+
+ //! reserves enough space to fit sz hyper-planes
+ void reserve(size_t sz);
+ //! resizes matrix to the specified number of hyper-planes
+ void resize(size_t sz);
+ //! resizes matrix to the specified number of hyper-planes; initializes the newly added elements
+ void resize(size_t sz, const Scalar& s);
+ //! internal function
+ void push_back_(const void* elem);
+ //! adds element to the end of 1d matrix (or possibly multiple elements when _Tp=Mat)
+ template<typename _Tp> void push_back(const _Tp& elem);
+ template<typename _Tp> void push_back(const Mat_<_Tp>& elem);
+ void push_back(const Mat& m);
+ //! removes several hyper-planes from bottom of the matrix
+ void pop_back(size_t nelems=1);
+
+ //! locates matrix header within a parent matrix. See below
+ void locateROI( Size& wholeSize, Point& ofs ) const;
+ //! moves/resizes the current matrix ROI inside the parent matrix.
+ Mat& adjustROI( int dtop, int dbottom, int dleft, int dright );
+ //! extracts a rectangular sub-matrix
+ // (this is a generalized form of row, rowRange etc.)
+ Mat operator()( Range rowRange, Range colRange ) const;
+ Mat operator()( const Rect& roi ) const;
+ Mat operator()( const Range* ranges ) const;
+
+ //! converts header to CvMat; no data is copied
+ operator CvMat() const;
+ //! converts header to CvMatND; no data is copied
+ operator CvMatND() const;
+ //! converts header to IplImage; no data is copied
+ operator IplImage() const;
+
+ template<typename _Tp> operator vector<_Tp>() const;
+ template<typename _Tp, int n> operator Vec<_Tp, n>() const;
+ template<typename _Tp, int m, int n> operator Matx<_Tp, m, n>() const;
+
+ //! returns true iff the matrix data is continuous
+ // (i.e. when there are no gaps between successive rows).
+ // similar to CV_IS_MAT_CONT(cvmat->type)
+ bool isContinuous() const;
+
+ //! returns true if the matrix is a submatrix of another matrix
+ bool isSubmatrix() const;
+
+ //! returns element size in bytes,
+ // similar to CV_ELEM_SIZE(cvmat->type)
+ size_t elemSize() const;
+ //! returns the size of element channel in bytes.
+ size_t elemSize1() const;
+ //! returns element type, similar to CV_MAT_TYPE(cvmat->type)
+ int type() const;
+ //! returns element type, similar to CV_MAT_DEPTH(cvmat->type)
+ int depth() const;
+ //! returns element type, similar to CV_MAT_CN(cvmat->type)
+ int channels() const;
+ //! returns step/elemSize1()
+ size_t step1(int i=0) const;
+ //! returns true if matrix data is NULL
+ bool empty() const;
+ //! returns the total number of matrix elements
+ size_t total() const;
+
+ //! returns N if the matrix is 1-channel (N x ptdim) or ptdim-channel (1 x N) or (N x 1); negative number otherwise
+ int checkVector(int elemChannels, int depth=-1, bool requireContinuous=true) const;
+
+ //! returns pointer to i0-th submatrix along the dimension #0
+ uchar* ptr(int i0=0);
+ const uchar* ptr(int i0=0) const;
+
+ //! returns pointer to (i0,i1) submatrix along the dimensions #0 and #1
+ uchar* ptr(int i0, int i1);
+ const uchar* ptr(int i0, int i1) const;
+
+ //! returns pointer to (i0,i1,i3) submatrix along the dimensions #0, #1, #2
+ uchar* ptr(int i0, int i1, int i2);
+ const uchar* ptr(int i0, int i1, int i2) const;
+
+ //! returns pointer to the matrix element
+ uchar* ptr(const int* idx);
+ //! returns read-only pointer to the matrix element
+ const uchar* ptr(const int* idx) const;
+
+ template<int n> uchar* ptr(const Vec<int, n>& idx);
+ template<int n> const uchar* ptr(const Vec<int, n>& idx) const;
+
+ //! template version of the above method
+ template<typename _Tp> _Tp* ptr(int i0=0);
+ template<typename _Tp> const _Tp* ptr(int i0=0) const;
+
+ template<typename _Tp> _Tp* ptr(int i0, int i1);
+ template<typename _Tp> const _Tp* ptr(int i0, int i1) const;
+
+ template<typename _Tp> _Tp* ptr(int i0, int i1, int i2);
+ template<typename _Tp> const _Tp* ptr(int i0, int i1, int i2) const;
+
+ template<typename _Tp> _Tp* ptr(const int* idx);
+ template<typename _Tp> const _Tp* ptr(const int* idx) const;
+
+ template<typename _Tp, int n> _Tp* ptr(const Vec<int, n>& idx);
+ template<typename _Tp, int n> const _Tp* ptr(const Vec<int, n>& idx) const;
+
+ //! the same as above, with the pointer dereferencing
+ template<typename _Tp> _Tp& at(int i0=0);
+ template<typename _Tp> const _Tp& at(int i0=0) const;
+
+ template<typename _Tp> _Tp& at(int i0, int i1);
+ template<typename _Tp> const _Tp& at(int i0, int i1) const;
+
+ template<typename _Tp> _Tp& at(int i0, int i1, int i2);
+ template<typename _Tp> const _Tp& at(int i0, int i1, int i2) const;
+
+ template<typename _Tp> _Tp& at(const int* idx);
+ template<typename _Tp> const _Tp& at(const int* idx) const;
+
+ template<typename _Tp, int n> _Tp& at(const Vec<int, n>& idx);
+ template<typename _Tp, int n> const _Tp& at(const Vec<int, n>& idx) const;
+
+ //! special versions for 2D arrays (especially convenient for referencing image pixels)
+ template<typename _Tp> _Tp& at(Point pt);
+ template<typename _Tp> const _Tp& at(Point pt) const;
+
+ //! template methods for iteration over matrix elements.
+ // the iterators take care of skipping gaps in the end of rows (if any)
+ template<typename _Tp> MatIterator_<_Tp> begin();
+ template<typename _Tp> MatIterator_<_Tp> end();
+ template<typename _Tp> MatConstIterator_<_Tp> begin() const;
+ template<typename _Tp> MatConstIterator_<_Tp> end() const;
+
+ enum { MAGIC_VAL=0x42FF0000, AUTO_STEP=0, CONTINUOUS_FLAG=CV_MAT_CONT_FLAG, SUBMATRIX_FLAG=CV_SUBMAT_FLAG };
+
+ /*! includes several bit-fields:
+ - the magic signature
+ - continuity flag
+ - depth
+ - number of channels
+ */
+ int flags;
+ //! the matrix dimensionality, >= 2
+ int dims;
+ //! the number of rows and columns or (-1, -1) when the matrix has more than 2 dimensions
+ int rows, cols;
+ //! pointer to the data
+ uchar* data;
+
+ //! pointer to the reference counter;
+ // when matrix points to user-allocated data, the pointer is NULL
+ int* refcount;
+
+ //! helper fields used in locateROI and adjustROI
+ uchar* datastart;
+ uchar* dataend;
+ uchar* datalimit;
+
+ //! custom allocator
+ MatAllocator* allocator;
+
+ struct CV_EXPORTS MSize
+ {
+ MSize(int* _p);
+ Size operator()() const;
+ const int& operator[](int i) const;
+ int& operator[](int i);
+ operator const int*() const;
+ bool operator == (const MSize& sz) const;
+ bool operator != (const MSize& sz) const;
+
+ int* p;
+ };
+
+ struct CV_EXPORTS MStep
+ {
+ MStep();
+ MStep(size_t s);
+ const size_t& operator[](int i) const;
+ size_t& operator[](int i);
+ operator size_t() const;
+ MStep& operator = (size_t s);
+
+ size_t* p;
+ size_t buf[2];
+ protected:
+ MStep& operator = (const MStep&);
+ };
+
+ MSize size;
+ MStep step;
+
+protected:
+ void initEmpty();
+};
+
+
+/*!
+ Random Number Generator
+
+ The class implements RNG using Multiply-with-Carry algorithm
+*/
+class CV_EXPORTS RNG
+{
+public:
+ enum { UNIFORM=0, NORMAL=1 };
+
+ RNG();
+ RNG(uint64 state);
+ //! updates the state and returns the next 32-bit unsigned integer random number
+ unsigned next();
+
+ operator uchar();
+ operator schar();
+ operator ushort();
+ operator short();
+ operator unsigned();
+ //! returns a random integer sampled uniformly from [0, N).
+ unsigned operator ()(unsigned N);
+ unsigned operator ()();
+ operator int();
+ operator float();
+ operator double();
+ //! returns uniformly distributed integer random number from [a,b) range
+ int uniform(int a, int b);
+ //! returns uniformly distributed floating-point random number from [a,b) range
+ float uniform(float a, float b);
+ //! returns uniformly distributed double-precision floating-point random number from [a,b) range
+ double uniform(double a, double b);
+ void fill( InputOutputArray mat, int distType, InputArray a, InputArray b, bool saturateRange=false );
+ //! returns Gaussian random variate with mean zero.
+ double gaussian(double sigma);
+
+ uint64 state;
+};
+
+/*!
+ Random Number Generator - MT
+
+ The class implements RNG using the Mersenne Twister algorithm
+*/
+class CV_EXPORTS RNG_MT19937
+{
+public:
+ RNG_MT19937();
+ RNG_MT19937(unsigned s);
+ void seed(unsigned s);
+
+ unsigned next();
+
+ operator int();
+ operator unsigned();
+ operator float();
+ operator double();
+
+ unsigned operator ()(unsigned N);
+ unsigned operator ()();
+
+ //! returns uniformly distributed integer random number from [a,b) range
+ int uniform(int a, int b);
+ //! returns uniformly distributed floating-point random number from [a,b) range
+ float uniform(float a, float b);
+ //! returns uniformly distributed double-precision floating-point random number from [a,b) range
+ double uniform(double a, double b);
+
+private:
+ enum PeriodParameters {N = 624, M = 397};
+ unsigned state[N];
+ int mti;
+};
+
+/*!
+ Termination criteria in iterative algorithms
+ */
+class CV_EXPORTS TermCriteria
+{
+public:
+ enum
+ {
+ COUNT=1, //!< the maximum number of iterations or elements to compute
+ MAX_ITER=COUNT, //!< ditto
+ EPS=2 //!< the desired accuracy or change in parameters at which the iterative algorithm stops
+ };
+
+ //! default constructor
+ TermCriteria();
+ //! full constructor
+ TermCriteria(int type, int maxCount, double epsilon);
+ //! conversion from CvTermCriteria
+ TermCriteria(const CvTermCriteria& criteria);
+ //! conversion to CvTermCriteria
+ operator CvTermCriteria() const;
+
+ int type; //!< the type of termination criteria: COUNT, EPS or COUNT + EPS
+ int maxCount; // the maximum number of iterations/elements
+ double epsilon; // the desired accuracy
+};
+
+
+typedef void (*BinaryFunc)(const uchar* src1, size_t step1,
+ const uchar* src2, size_t step2,
+ uchar* dst, size_t step, Size sz,
+ void*);
+
+CV_EXPORTS BinaryFunc getConvertFunc(int sdepth, int ddepth);
+CV_EXPORTS BinaryFunc getConvertScaleFunc(int sdepth, int ddepth);
+CV_EXPORTS BinaryFunc getCopyMaskFunc(size_t esz);
+
+//! swaps two matrices
+CV_EXPORTS void swap(Mat& a, Mat& b);
+
+//! converts array (CvMat or IplImage) to cv::Mat
+CV_EXPORTS Mat cvarrToMat(const CvArr* arr, bool copyData=false,
+ bool allowND=true, int coiMode=0);
+//! extracts Channel of Interest from CvMat or IplImage and makes cv::Mat out of it.
+CV_EXPORTS void extractImageCOI(const CvArr* arr, OutputArray coiimg, int coi=-1);
+//! inserts single-channel cv::Mat into a multi-channel CvMat or IplImage
+CV_EXPORTS void insertImageCOI(InputArray coiimg, CvArr* arr, int coi=-1);
+
+//! adds one matrix to another (dst = src1 + src2)
+CV_EXPORTS_W void add(InputArray src1, InputArray src2, OutputArray dst,
+ InputArray mask=noArray(), int dtype=-1);
+//! subtracts one matrix from another (dst = src1 - src2)
+CV_EXPORTS_W void subtract(InputArray src1, InputArray src2, OutputArray dst,
+ InputArray mask=noArray(), int dtype=-1);
+
+//! computes element-wise weighted product of the two arrays (dst = scale*src1*src2)
+CV_EXPORTS_W void multiply(InputArray src1, InputArray src2,
+ OutputArray dst, double scale=1, int dtype=-1);
+
+//! computes element-wise weighted quotient of the two arrays (dst = scale*src1/src2)
+CV_EXPORTS_W void divide(InputArray src1, InputArray src2, OutputArray dst,
+ double scale=1, int dtype=-1);
+
+//! computes element-wise weighted reciprocal of an array (dst = scale/src2)
+CV_EXPORTS_W void divide(double scale, InputArray src2,
+ OutputArray dst, int dtype=-1);
+
+//! adds scaled array to another one (dst = alpha*src1 + src2)
+CV_EXPORTS_W void scaleAdd(InputArray src1, double alpha, InputArray src2, OutputArray dst);
+
+//! computes weighted sum of two arrays (dst = alpha*src1 + beta*src2 + gamma)
+CV_EXPORTS_W void addWeighted(InputArray src1, double alpha, InputArray src2,
+ double beta, double gamma, OutputArray dst, int dtype=-1);
+
+//! scales array elements, computes absolute values and converts the results to 8-bit unsigned integers: dst(i)=saturate_cast<uchar>abs(src(i)*alpha+beta)
+CV_EXPORTS_W void convertScaleAbs(InputArray src, OutputArray dst,
+ double alpha=1, double beta=0);
+//! transforms array of numbers using a lookup table: dst(i)=lut(src(i))
+CV_EXPORTS_W void LUT(InputArray src, InputArray lut, OutputArray dst,
+ int interpolation=0);
+
+//! computes sum of array elements
+CV_EXPORTS_AS(sumElems) Scalar sum(InputArray src);
+//! computes the number of nonzero array elements
+CV_EXPORTS_W int countNonZero( InputArray src );
+//! returns the list of locations of non-zero pixels
+CV_EXPORTS_W void findNonZero( InputArray src, OutputArray idx );
+
+//! computes mean value of selected array elements
+CV_EXPORTS_W Scalar mean(InputArray src, InputArray mask=noArray());
+//! computes mean value and standard deviation of all or selected array elements
+CV_EXPORTS_W void meanStdDev(InputArray src, OutputArray mean, OutputArray stddev,
+ InputArray mask=noArray());
+//! computes norm of the selected array part
+CV_EXPORTS_W double norm(InputArray src1, int normType=NORM_L2, InputArray mask=noArray());
+//! computes norm of selected part of the difference between two arrays
+CV_EXPORTS_W double norm(InputArray src1, InputArray src2,
+ int normType=NORM_L2, InputArray mask=noArray());
+
+//! naive nearest neighbor finder
+CV_EXPORTS_W void batchDistance(InputArray src1, InputArray src2,
+ OutputArray dist, int dtype, OutputArray nidx,
+ int normType=NORM_L2, int K=0,
+ InputArray mask=noArray(), int update=0,
+ bool crosscheck=false);
+
+//! scales and shifts array elements so that either the specified norm (alpha) or the minimum (alpha) and maximum (beta) array values get the specified values
+CV_EXPORTS_W void normalize( InputArray src, OutputArray dst, double alpha=1, double beta=0,
+ int norm_type=NORM_L2, int dtype=-1, InputArray mask=noArray());
+
+//! finds global minimum and maximum array elements and returns their values and their locations
+CV_EXPORTS_W void minMaxLoc(InputArray src, CV_OUT double* minVal,
+ CV_OUT double* maxVal=0, CV_OUT Point* minLoc=0,
+ CV_OUT Point* maxLoc=0, InputArray mask=noArray());
+CV_EXPORTS void minMaxIdx(InputArray src, double* minVal, double* maxVal,
+ int* minIdx=0, int* maxIdx=0, InputArray mask=noArray());
+
+//! transforms 2D matrix to 1D row or column vector by taking sum, minimum, maximum or mean value over all the rows
+CV_EXPORTS_W void reduce(InputArray src, OutputArray dst, int dim, int rtype, int dtype=-1);
+
+//! makes multi-channel array out of several single-channel arrays
+CV_EXPORTS void merge(const Mat* mv, size_t count, OutputArray dst);
+CV_EXPORTS void merge(const vector<Mat>& mv, OutputArray dst );
+
+//! makes multi-channel array out of several single-channel arrays
+CV_EXPORTS_W void merge(InputArrayOfArrays mv, OutputArray dst);
+
+//! copies each plane of a multi-channel array to a dedicated array
+CV_EXPORTS void split(const Mat& src, Mat* mvbegin);
+CV_EXPORTS void split(const Mat& m, vector<Mat>& mv );
+
+//! copies each plane of a multi-channel array to a dedicated array
+CV_EXPORTS_W void split(InputArray m, OutputArrayOfArrays mv);
+
+//! copies selected channels from the input arrays to the selected channels of the output arrays
+CV_EXPORTS void mixChannels(const Mat* src, size_t nsrcs, Mat* dst, size_t ndsts,
+ const int* fromTo, size_t npairs);
+CV_EXPORTS void mixChannels(const vector<Mat>& src, vector<Mat>& dst,
+ const int* fromTo, size_t npairs);
+CV_EXPORTS_W void mixChannels(InputArrayOfArrays src, InputArrayOfArrays dst,
+ const vector<int>& fromTo);
+
+//! extracts a single channel from src (coi is 0-based index)
+CV_EXPORTS_W void extractChannel(InputArray src, OutputArray dst, int coi);
+
+//! inserts a single channel to dst (coi is 0-based index)
+CV_EXPORTS_W void insertChannel(InputArray src, InputOutputArray dst, int coi);
+
+//! reverses the order of the rows, columns or both in a matrix
+CV_EXPORTS_W void flip(InputArray src, OutputArray dst, int flipCode);
+
+//! replicates the input matrix the specified number of times in the horizontal and/or vertical direction
+CV_EXPORTS_W void repeat(InputArray src, int ny, int nx, OutputArray dst);
+CV_EXPORTS Mat repeat(const Mat& src, int ny, int nx);
+
+CV_EXPORTS void hconcat(const Mat* src, size_t nsrc, OutputArray dst);
+CV_EXPORTS void hconcat(InputArray src1, InputArray src2, OutputArray dst);
+CV_EXPORTS_W void hconcat(InputArrayOfArrays src, OutputArray dst);
+
+CV_EXPORTS void vconcat(const Mat* src, size_t nsrc, OutputArray dst);
+CV_EXPORTS void vconcat(InputArray src1, InputArray src2, OutputArray dst);
+CV_EXPORTS_W void vconcat(InputArrayOfArrays src, OutputArray dst);
+
+//! computes bitwise conjunction of the two arrays (dst = src1 & src2)
+CV_EXPORTS_W void bitwise_and(InputArray src1, InputArray src2,
+ OutputArray dst, InputArray mask=noArray());
+//! computes bitwise disjunction of the two arrays (dst = src1 | src2)
+CV_EXPORTS_W void bitwise_or(InputArray src1, InputArray src2,
+ OutputArray dst, InputArray mask=noArray());
+//! computes bitwise exclusive-or of the two arrays (dst = src1 ^ src2)
+CV_EXPORTS_W void bitwise_xor(InputArray src1, InputArray src2,
+ OutputArray dst, InputArray mask=noArray());
+//! inverts each bit of array (dst = ~src)
+CV_EXPORTS_W void bitwise_not(InputArray src, OutputArray dst,
+ InputArray mask=noArray());
+//! computes element-wise absolute difference of two arrays (dst = abs(src1 - src2))
+CV_EXPORTS_W void absdiff(InputArray src1, InputArray src2, OutputArray dst);
+//! set mask elements for those array elements which are within the element-specific bounding box (dst = lowerb <= src && src < upperb)
+CV_EXPORTS_W void inRange(InputArray src, InputArray lowerb,
+ InputArray upperb, OutputArray dst);
+//! compares elements of two arrays (dst = src1 \<cmpop\> src2)
+CV_EXPORTS_W void compare(InputArray src1, InputArray src2, OutputArray dst, int cmpop);
+//! computes per-element minimum of two arrays (dst = min(src1, src2))
+CV_EXPORTS_W void min(InputArray src1, InputArray src2, OutputArray dst);
+//! computes per-element maximum of two arrays (dst = max(src1, src2))
+CV_EXPORTS_W void max(InputArray src1, InputArray src2, OutputArray dst);
+
+//! computes per-element minimum of two arrays (dst = min(src1, src2))
+CV_EXPORTS void min(const Mat& src1, const Mat& src2, Mat& dst);
+//! computes per-element minimum of array and scalar (dst = min(src1, src2))
+CV_EXPORTS void min(const Mat& src1, double src2, Mat& dst);
+//! computes per-element maximum of two arrays (dst = max(src1, src2))
+CV_EXPORTS void max(const Mat& src1, const Mat& src2, Mat& dst);
+//! computes per-element maximum of array and scalar (dst = max(src1, src2))
+CV_EXPORTS void max(const Mat& src1, double src2, Mat& dst);
+
+//! computes square root of each matrix element (dst = src**0.5)
+CV_EXPORTS_W void sqrt(InputArray src, OutputArray dst);
+//! raises the input matrix elements to the specified power (b = a**power)
+CV_EXPORTS_W void pow(InputArray src, double power, OutputArray dst);
+//! computes exponent of each matrix element (dst = e**src)
+CV_EXPORTS_W void exp(InputArray src, OutputArray dst);
+//! computes natural logarithm of absolute value of each matrix element: dst = log(abs(src))
+CV_EXPORTS_W void log(InputArray src, OutputArray dst);
+//! computes cube root of the argument
+CV_EXPORTS_W float cubeRoot(float val);
+//! computes the angle in degrees (0..360) of the vector (x,y)
+CV_EXPORTS_W float fastAtan2(float y, float x);
+
+CV_EXPORTS void exp(const float* src, float* dst, int n);
+CV_EXPORTS void log(const float* src, float* dst, int n);
+CV_EXPORTS void fastAtan2(const float* y, const float* x, float* dst, int n, bool angleInDegrees);
+CV_EXPORTS void magnitude(const float* x, const float* y, float* dst, int n);
+
+//! converts polar coordinates to Cartesian
+CV_EXPORTS_W void polarToCart(InputArray magnitude, InputArray angle,
+ OutputArray x, OutputArray y, bool angleInDegrees=false);
+//! converts Cartesian coordinates to polar
+CV_EXPORTS_W void cartToPolar(InputArray x, InputArray y,
+ OutputArray magnitude, OutputArray angle,
+ bool angleInDegrees=false);
+//! computes angle (angle(i)) of each (x(i), y(i)) vector
+CV_EXPORTS_W void phase(InputArray x, InputArray y, OutputArray angle,
+ bool angleInDegrees=false);
+//! computes magnitude (magnitude(i)) of each (x(i), y(i)) vector
+CV_EXPORTS_W void magnitude(InputArray x, InputArray y, OutputArray magnitude);
+//! checks that each matrix element is within the specified range.
+CV_EXPORTS_W bool checkRange(InputArray a, bool quiet=true, CV_OUT Point* pos=0,
+ double minVal=-DBL_MAX, double maxVal=DBL_MAX);
+//! converts NaN's to the given number
+CV_EXPORTS_W void patchNaNs(InputOutputArray a, double val=0);
+
+//! implements generalized matrix product algorithm GEMM from BLAS
+CV_EXPORTS_W void gemm(InputArray src1, InputArray src2, double alpha,
+ InputArray src3, double beta, OutputArray dst, int flags=0);
+//! multiplies matrix by its transposition from the left or from the right
+CV_EXPORTS_W void mulTransposed( InputArray src, OutputArray dst, bool aTa,
+ InputArray delta=noArray(),
+ double scale=1, int dtype=-1 );
+//! transposes the matrix
+CV_EXPORTS_W void transpose(InputArray src, OutputArray dst);
+//! performs affine transformation of each element of multi-channel input matrix
+CV_EXPORTS_W void transform(InputArray src, OutputArray dst, InputArray m );
+//! performs perspective transformation of each element of multi-channel input matrix
+CV_EXPORTS_W void perspectiveTransform(InputArray src, OutputArray dst, InputArray m );
+
+//! extends the symmetrical matrix from the lower half or from the upper half
+CV_EXPORTS_W void completeSymm(InputOutputArray mtx, bool lowerToUpper=false);
+//! initializes scaled identity matrix
+CV_EXPORTS_W void setIdentity(InputOutputArray mtx, const Scalar& s=Scalar(1));
+//! computes determinant of a square matrix
+CV_EXPORTS_W double determinant(InputArray mtx);
+//! computes trace of a matrix
+CV_EXPORTS_W Scalar trace(InputArray mtx);
+//! computes inverse or pseudo-inverse matrix
+CV_EXPORTS_W double invert(InputArray src, OutputArray dst, int flags=DECOMP_LU);
+//! solves linear system or a least-square problem
+CV_EXPORTS_W bool solve(InputArray src1, InputArray src2,
+ OutputArray dst, int flags=DECOMP_LU);
+
+enum
+{
+ SORT_EVERY_ROW=0,
+ SORT_EVERY_COLUMN=1,
+ SORT_ASCENDING=0,
+ SORT_DESCENDING=16
+};
+
+//! sorts independently each matrix row or each matrix column
+CV_EXPORTS_W void sort(InputArray src, OutputArray dst, int flags);
+//! sorts independently each matrix row or each matrix column
+CV_EXPORTS_W void sortIdx(InputArray src, OutputArray dst, int flags);
+//! finds real roots of a cubic polynomial
+CV_EXPORTS_W int solveCubic(InputArray coeffs, OutputArray roots);
+//! finds real and complex roots of a polynomial
+CV_EXPORTS_W double solvePoly(InputArray coeffs, OutputArray roots, int maxIters=300);
+//! finds eigenvalues of a symmetric matrix
+CV_EXPORTS bool eigen(InputArray src, OutputArray eigenvalues, int lowindex=-1,
+ int highindex=-1);
+//! finds eigenvalues and eigenvectors of a symmetric matrix
+CV_EXPORTS bool eigen(InputArray src, OutputArray eigenvalues,
+ OutputArray eigenvectors,
+ int lowindex=-1, int highindex=-1);
+CV_EXPORTS_W bool eigen(InputArray src, bool computeEigenvectors,
+ OutputArray eigenvalues, OutputArray eigenvectors);
+
+enum
+{
+ COVAR_SCRAMBLED=0,
+ COVAR_NORMAL=1,
+ COVAR_USE_AVG=2,
+ COVAR_SCALE=4,
+ COVAR_ROWS=8,
+ COVAR_COLS=16
+};
+
+//! computes covariation matrix of a set of samples
+CV_EXPORTS void calcCovarMatrix( const Mat* samples, int nsamples, Mat& covar, Mat& mean,
+ int flags, int ctype=CV_64F);
+//! computes covariation matrix of a set of samples
+CV_EXPORTS_W void calcCovarMatrix( InputArray samples, OutputArray covar,
+ OutputArray mean, int flags, int ctype=CV_64F);
+
+/*!
+ Principal Component Analysis
+
+ The class PCA is used to compute the special basis for a set of vectors.
+ The basis will consist of eigenvectors of the covariance matrix computed
+ from the input set of vectors. After PCA is performed, vectors can be transformed from
+ the original high-dimensional space to the subspace formed by a few most
+ prominent eigenvectors (called the principal components),
+ corresponding to the largest eigenvalues of the covariation matrix.
+ Thus the dimensionality of the vector and the correlation between the coordinates is reduced.
+
+ The following sample is the function that takes two matrices. The first one stores the set
+ of vectors (a row per vector) that is used to compute PCA, the second one stores another
+ "test" set of vectors (a row per vector) that are first compressed with PCA,
+ then reconstructed back and then the reconstruction error norm is computed and printed for each vector.
+
+ \code
+ using namespace cv;
+
+ PCA compressPCA(const Mat& pcaset, int maxComponents,
+ const Mat& testset, Mat& compressed)
+ {
+ PCA pca(pcaset, // pass the data
+ Mat(), // we do not have a pre-computed mean vector,
+ // so let the PCA engine to compute it
+ CV_PCA_DATA_AS_ROW, // indicate that the vectors
+ // are stored as matrix rows
+ // (use CV_PCA_DATA_AS_COL if the vectors are
+ // the matrix columns)
+ maxComponents // specify, how many principal components to retain
+ );
+ // if there is no test data, just return the computed basis, ready-to-use
+ if( !testset.data )
+ return pca;
+ CV_Assert( testset.cols == pcaset.cols );
+
+ compressed.create(testset.rows, maxComponents, testset.type());
+
+ Mat reconstructed;
+ for( int i = 0; i < testset.rows; i++ )
+ {
+ Mat vec = testset.row(i), coeffs = compressed.row(i), reconstructed;
+ // compress the vector, the result will be stored
+ // in the i-th row of the output matrix
+ pca.project(vec, coeffs);
+ // and then reconstruct it
+ pca.backProject(coeffs, reconstructed);
+ // and measure the error
+ printf("%d. diff = %g\n", i, norm(vec, reconstructed, NORM_L2));
+ }
+ return pca;
+ }
+ \endcode
+*/
+class CV_EXPORTS PCA
+{
+public:
+ //! default constructor
+ PCA();
+ //! the constructor that performs PCA
+ PCA(InputArray data, InputArray mean, int flags, int maxComponents=0);
+ PCA(InputArray data, InputArray mean, int flags, double retainedVariance);
+ //! operator that performs PCA. The previously stored data, if any, is released
+ PCA& operator()(InputArray data, InputArray mean, int flags, int maxComponents=0);
+ PCA& computeVar(InputArray data, InputArray mean, int flags, double retainedVariance);
+ //! projects vector from the original space to the principal components subspace
+ Mat project(InputArray vec) const;
+ //! projects vector from the original space to the principal components subspace
+ void project(InputArray vec, OutputArray result) const;
+ //! reconstructs the original vector from the projection
+ Mat backProject(InputArray vec) const;
+ //! reconstructs the original vector from the projection
+ void backProject(InputArray vec, OutputArray result) const;
+
+ Mat eigenvectors; //!< eigenvectors of the covariation matrix
+ Mat eigenvalues; //!< eigenvalues of the covariation matrix
+ Mat mean; //!< mean value subtracted before the projection and added after the back projection
+};
+
+CV_EXPORTS_W void PCACompute(InputArray data, CV_OUT InputOutputArray mean,
+ OutputArray eigenvectors, int maxComponents=0);
+
+CV_EXPORTS_W void PCAComputeVar(InputArray data, CV_OUT InputOutputArray mean,
+ OutputArray eigenvectors, double retainedVariance);
+
+CV_EXPORTS_W void PCAProject(InputArray data, InputArray mean,
+ InputArray eigenvectors, OutputArray result);
+
+CV_EXPORTS_W void PCABackProject(InputArray data, InputArray mean,
+ InputArray eigenvectors, OutputArray result);
+
+
+/*!
+ Singular Value Decomposition class
+
+ The class is used to compute Singular Value Decomposition of a floating-point matrix and then
+ use it to solve least-square problems, under-determined linear systems, invert matrices,
+ compute condition numbers etc.
+
+ For a bit faster operation you can pass flags=SVD::MODIFY_A|... to modify the decomposed matrix
+ when it is not necessarily to preserve it. If you want to compute condition number of a matrix
+ or absolute value of its determinant - you do not need SVD::u or SVD::vt,
+ so you can pass flags=SVD::NO_UV|... . Another flag SVD::FULL_UV indicates that the full-size SVD::u and SVD::vt
+ must be computed, which is not necessary most of the time.
+*/
+class CV_EXPORTS SVD
+{
+public:
+ enum { MODIFY_A=1, NO_UV=2, FULL_UV=4 };
+ //! the default constructor
+ SVD();
+ //! the constructor that performs SVD
+ SVD( InputArray src, int flags=0 );
+ //! the operator that performs SVD. The previously allocated SVD::u, SVD::w are SVD::vt are released.
+ SVD& operator ()( InputArray src, int flags=0 );
+
+ //! decomposes matrix and stores the results to user-provided matrices
+ static void compute( InputArray src, OutputArray w,
+ OutputArray u, OutputArray vt, int flags=0 );
+ //! computes singular values of a matrix
+ static void compute( InputArray src, OutputArray w, int flags=0 );
+ //! performs back substitution
+ static void backSubst( InputArray w, InputArray u,
+ InputArray vt, InputArray rhs,
+ OutputArray dst );
+
+ template<typename _Tp, int m, int n, int nm> static void compute( const Matx<_Tp, m, n>& a,
+ Matx<_Tp, nm, 1>& w, Matx<_Tp, m, nm>& u, Matx<_Tp, n, nm>& vt );
+ template<typename _Tp, int m, int n, int nm> static void compute( const Matx<_Tp, m, n>& a,
+ Matx<_Tp, nm, 1>& w );
+ template<typename _Tp, int m, int n, int nm, int nb> static void backSubst( const Matx<_Tp, nm, 1>& w,
+ const Matx<_Tp, m, nm>& u, const Matx<_Tp, n, nm>& vt, const Matx<_Tp, m, nb>& rhs, Matx<_Tp, n, nb>& dst );
+
+ //! finds dst = arg min_{|dst|=1} |m*dst|
+ static void solveZ( InputArray src, OutputArray dst );
+ //! performs back substitution, so that dst is the solution or pseudo-solution of m*dst = rhs, where m is the decomposed matrix
+ void backSubst( InputArray rhs, OutputArray dst ) const;
+
+ Mat u, w, vt;
+};
+
+//! computes SVD of src
+CV_EXPORTS_W void SVDecomp( InputArray src, CV_OUT OutputArray w,
+ CV_OUT OutputArray u, CV_OUT OutputArray vt, int flags=0 );
+
+//! performs back substitution for the previously computed SVD
+CV_EXPORTS_W void SVBackSubst( InputArray w, InputArray u, InputArray vt,
+ InputArray rhs, CV_OUT OutputArray dst );
+
+//! computes Mahalanobis distance between two vectors: sqrt((v1-v2)'*icovar*(v1-v2)), where icovar is the inverse covariation matrix
+CV_EXPORTS_W double Mahalanobis(InputArray v1, InputArray v2, InputArray icovar);
+//! a synonym for Mahalanobis
+CV_EXPORTS double Mahalonobis(InputArray v1, InputArray v2, InputArray icovar);
+
+//! performs forward or inverse 1D or 2D Discrete Fourier Transformation
+CV_EXPORTS_W void dft(InputArray src, OutputArray dst, int flags=0, int nonzeroRows=0);
+//! performs inverse 1D or 2D Discrete Fourier Transformation
+CV_EXPORTS_W void idft(InputArray src, OutputArray dst, int flags=0, int nonzeroRows=0);
+//! performs forward or inverse 1D or 2D Discrete Cosine Transformation
+CV_EXPORTS_W void dct(InputArray src, OutputArray dst, int flags=0);
+//! performs inverse 1D or 2D Discrete Cosine Transformation
+CV_EXPORTS_W void idct(InputArray src, OutputArray dst, int flags=0);
+//! computes element-wise product of the two Fourier spectrums. The second spectrum can optionally be conjugated before the multiplication
+CV_EXPORTS_W void mulSpectrums(InputArray a, InputArray b, OutputArray c,
+ int flags, bool conjB=false);
+//! computes the minimal vector size vecsize1 >= vecsize so that the dft() of the vector of length vecsize1 can be computed efficiently
+CV_EXPORTS_W int getOptimalDFTSize(int vecsize);
+
+/*!
+ Various k-Means flags
+*/
+enum
+{
+ KMEANS_RANDOM_CENTERS=0, // Chooses random centers for k-Means initialization
+ KMEANS_PP_CENTERS=2, // Uses k-Means++ algorithm for initialization
+ KMEANS_USE_INITIAL_LABELS=1 // Uses the user-provided labels for K-Means initialization
+};
+//! clusters the input data using k-Means algorithm
+CV_EXPORTS_W double kmeans( InputArray data, int K, CV_OUT InputOutputArray bestLabels,
+ TermCriteria criteria, int attempts,
+ int flags, OutputArray centers=noArray() );
+
+//! returns the thread-local Random number generator
+CV_EXPORTS RNG& theRNG();
+
+//! sets state of the thread-local Random number generator
+CV_EXPORTS_W void setRNGSeed(int seed);
+
+//! returns the next unifomly-distributed random number of the specified type
+template<typename _Tp> static inline _Tp randu() { return (_Tp)theRNG(); }
+
+//! fills array with uniformly-distributed random numbers from the range [low, high)
+CV_EXPORTS_W void randu(InputOutputArray dst, InputArray low, InputArray high);
+
+//! fills array with normally-distributed random numbers with the specified mean and the standard deviation
+CV_EXPORTS_W void randn(InputOutputArray dst, InputArray mean, InputArray stddev);
+
+//! shuffles the input array elements
+CV_EXPORTS void randShuffle(InputOutputArray dst, double iterFactor=1., RNG* rng=0);
+CV_EXPORTS_AS(randShuffle) void randShuffle_(InputOutputArray dst, double iterFactor=1.);
+
+//! draws the line segment (pt1, pt2) in the image
+CV_EXPORTS_W void line(CV_IN_OUT Mat& img, Point pt1, Point pt2, const Scalar& color,
+ int thickness=1, int lineType=8, int shift=0);
+
+//! draws an arrow from pt1 to pt2 in the image
+CV_EXPORTS_W void arrowedLine(CV_IN_OUT Mat& img, Point pt1, Point pt2, const Scalar& color,
+ int thickness=1, int line_type=8, int shift=0, double tipLength=0.1);
+
+//! draws the rectangle outline or a solid rectangle with the opposite corners pt1 and pt2 in the image
+CV_EXPORTS_W void rectangle(CV_IN_OUT Mat& img, Point pt1, Point pt2,
+ const Scalar& color, int thickness=1,
+ int lineType=8, int shift=0);
+
+//! draws the rectangle outline or a solid rectangle covering rec in the image
+CV_EXPORTS void rectangle(CV_IN_OUT Mat& img, Rect rec,
+ const Scalar& color, int thickness=1,
+ int lineType=8, int shift=0);
+
+//! draws the circle outline or a solid circle in the image
+CV_EXPORTS_W void circle(CV_IN_OUT Mat& img, Point center, int radius,
+ const Scalar& color, int thickness=1,
+ int lineType=8, int shift=0);
+
+//! draws an elliptic arc, ellipse sector or a rotated ellipse in the image
+CV_EXPORTS_W void ellipse(CV_IN_OUT Mat& img, Point center, Size axes,
+ double angle, double startAngle, double endAngle,
+ const Scalar& color, int thickness=1,
+ int lineType=8, int shift=0);
+
+//! draws a rotated ellipse in the image
+CV_EXPORTS_W void ellipse(CV_IN_OUT Mat& img, const RotatedRect& box, const Scalar& color,
+ int thickness=1, int lineType=8);
+
+/* ----------------------------------------------------------------------------------------- */
+/* ADDING A SET OF PREDEFINED MARKERS WHICH COULD BE USED TO HIGHLIGHT POSITIONS IN AN IMAGE */
+/* ----------------------------------------------------------------------------------------- */
+
+//! Possible set of marker types used for the drawMarker function
+enum MarkerTypes
+{
+ MARKER_CROSS = 0, // A crosshair marker shape
+ MARKER_TILTED_CROSS = 1, // A 45 degree tilted crosshair marker shape
+ MARKER_STAR = 2, // A star marker shape, combination of cross and tilted cross
+ MARKER_DIAMOND = 3, // A diamond marker shape
+ MARKER_SQUARE = 4, // A square marker shape
+ MARKER_TRIANGLE_UP = 5, // An upwards pointing triangle marker shape
+ MARKER_TRIANGLE_DOWN = 6 // A downwards pointing triangle marker shape
+};
+
+/** @brief Draws a marker on a predefined position in an image.
+
+The function drawMarker draws a marker on a given position in the image. For the moment several
+marker types are supported (`MARKER_CROSS`, `MARKER_TILTED_CROSS`, `MARKER_STAR`, `MARKER_DIAMOND`, `MARKER_SQUARE`,
+`MARKER_TRIANGLE_UP` and `MARKER_TRIANGLE_DOWN`).
+
+@param img Image.
+@param position The point where the crosshair is positioned.
+@param markerType The specific type of marker you want to use, see
+@param color Line color.
+@param thickness Line thickness.
+@param line_type Type of the line, see cv::LineTypes
+@param markerSize The length of the marker axis [default = 20 pixels]
+ */
+CV_EXPORTS_W void drawMarker(CV_IN_OUT Mat& img, Point position, const Scalar& color,
+ int markerType = MARKER_CROSS, int markerSize=20, int thickness=1,
+ int line_type=8);
+
+/* ----------------------------------------------------------------------------------------- */
+/* END OF MARKER SECTION */
+/* ----------------------------------------------------------------------------------------- */
+
+//! draws a filled convex polygon in the image
+CV_EXPORTS void fillConvexPoly(Mat& img, const Point* pts, int npts,
+ const Scalar& color, int lineType=8,
+ int shift=0);
+CV_EXPORTS_W void fillConvexPoly(InputOutputArray img, InputArray points,
+ const Scalar& color, int lineType=8,
+ int shift=0);
+
+//! fills an area bounded by one or more polygons
+CV_EXPORTS void fillPoly(Mat& img, const Point** pts,
+ const int* npts, int ncontours,
+ const Scalar& color, int lineType=8, int shift=0,
+ Point offset=Point() );
+
+CV_EXPORTS_W void fillPoly(InputOutputArray img, InputArrayOfArrays pts,
+ const Scalar& color, int lineType=8, int shift=0,
+ Point offset=Point() );
+
+//! draws one or more polygonal curves
+CV_EXPORTS void polylines(Mat& img, const Point** pts, const int* npts,
+ int ncontours, bool isClosed, const Scalar& color,
+ int thickness=1, int lineType=8, int shift=0 );
+
+CV_EXPORTS_W void polylines(InputOutputArray img, InputArrayOfArrays pts,
+ bool isClosed, const Scalar& color,
+ int thickness=1, int lineType=8, int shift=0 );
+
+//! clips the line segment by the rectangle Rect(0, 0, imgSize.width, imgSize.height)
+CV_EXPORTS bool clipLine(Size imgSize, CV_IN_OUT Point& pt1, CV_IN_OUT Point& pt2);
+
+//! clips the line segment by the rectangle imgRect
+CV_EXPORTS_W bool clipLine(Rect imgRect, CV_OUT CV_IN_OUT Point& pt1, CV_OUT CV_IN_OUT Point& pt2);
+
+/*!
+ Line iterator class
+
+ The class is used to iterate over all the pixels on the raster line
+ segment connecting two specified points.
+*/
+class CV_EXPORTS LineIterator
+{
+public:
+ //! intializes the iterator
+ LineIterator( const Mat& img, Point pt1, Point pt2,
+ int connectivity=8, bool leftToRight=false );
+ //! returns pointer to the current pixel
+ uchar* operator *();
+ //! prefix increment operator (++it). shifts iterator to the next pixel
+ LineIterator& operator ++();
+ //! postfix increment operator (it++). shifts iterator to the next pixel
+ LineIterator operator ++(int);
+ //! returns coordinates of the current pixel
+ Point pos() const;
+
+ uchar* ptr;
+ const uchar* ptr0;
+ int step, elemSize;
+ int err, count;
+ int minusDelta, plusDelta;
+ int minusStep, plusStep;
+};
+
+//! converts elliptic arc to a polygonal curve
+CV_EXPORTS_W void ellipse2Poly( Point center, Size axes, int angle,
+ int arcStart, int arcEnd, int delta,
+ CV_OUT vector<Point>& pts );
+
+enum
+{
+ FONT_HERSHEY_SIMPLEX = 0,
+ FONT_HERSHEY_PLAIN = 1,
+ FONT_HERSHEY_DUPLEX = 2,
+ FONT_HERSHEY_COMPLEX = 3,
+ FONT_HERSHEY_TRIPLEX = 4,
+ FONT_HERSHEY_COMPLEX_SMALL = 5,
+ FONT_HERSHEY_SCRIPT_SIMPLEX = 6,
+ FONT_HERSHEY_SCRIPT_COMPLEX = 7,
+ FONT_ITALIC = 16
+};
+
+//! renders text string in the image
+CV_EXPORTS_W void putText( Mat& img, const string& text, Point org,
+ int fontFace, double fontScale, Scalar color,
+ int thickness=1, int lineType=8,
+ bool bottomLeftOrigin=false );
+
+//! returns bounding box of the text string
+CV_EXPORTS_W Size getTextSize(const string& text, int fontFace,
+ double fontScale, int thickness,
+ CV_OUT int* baseLine);
+
+///////////////////////////////// Mat_<_Tp> ////////////////////////////////////
+
+/*!
+ Template matrix class derived from Mat
+
+ The class Mat_ is a "thin" template wrapper on top of cv::Mat. It does not have any extra data fields,
+ nor it or cv::Mat have any virtual methods and thus references or pointers to these two classes
+ can be safely converted one to another. But do it with care, for example:
+
+ \code
+ // create 100x100 8-bit matrix
+ Mat M(100,100,CV_8U);
+ // this will compile fine. no any data conversion will be done.
+ Mat_<float>& M1 = (Mat_<float>&)M;
+ // the program will likely crash at the statement below
+ M1(99,99) = 1.f;
+ \endcode
+
+ While cv::Mat is sufficient in most cases, cv::Mat_ can be more convenient if you use a lot of element
+ access operations and if you know matrix type at compile time.
+ Note that cv::Mat::at\<_Tp\>(int y, int x) and cv::Mat_\<_Tp\>::operator ()(int y, int x) do absolutely the
+ same thing and run at the same speed, but the latter is certainly shorter:
+
+ \code
+ Mat_<double> M(20,20);
+ for(int i = 0; i < M.rows; i++)
+ for(int j = 0; j < M.cols; j++)
+ M(i,j) = 1./(i+j+1);
+ Mat E, V;
+ eigen(M,E,V);
+ cout << E.at<double>(0,0)/E.at<double>(M.rows-1,0);
+ \endcode
+
+ It is easy to use Mat_ for multi-channel images/matrices - just pass cv::Vec as cv::Mat_ template parameter:
+
+ \code
+ // allocate 320x240 color image and fill it with green (in RGB space)
+ Mat_<Vec3b> img(240, 320, Vec3b(0,255,0));
+ // now draw a diagonal white line
+ for(int i = 0; i < 100; i++)
+ img(i,i)=Vec3b(255,255,255);
+ // and now modify the 2nd (red) channel of each pixel
+ for(int i = 0; i < img.rows; i++)
+ for(int j = 0; j < img.cols; j++)
+ img(i,j)[2] ^= (uchar)(i ^ j); // img(y,x)[c] accesses c-th channel of the pixel (x,y)
+ \endcode
+*/
+template<typename _Tp> class Mat_ : public Mat
+{
+public:
+ typedef _Tp value_type;
+ typedef typename DataType<_Tp>::channel_type channel_type;
+ typedef MatIterator_<_Tp> iterator;
+ typedef MatConstIterator_<_Tp> const_iterator;
+
+ //! default constructor
+ Mat_();
+ //! equivalent to Mat(_rows, _cols, DataType<_Tp>::type)
+ Mat_(int _rows, int _cols);
+ //! constructor that sets each matrix element to specified value
+ Mat_(int _rows, int _cols, const _Tp& value);
+ //! equivalent to Mat(_size, DataType<_Tp>::type)
+ explicit Mat_(Size _size);
+ //! constructor that sets each matrix element to specified value
+ Mat_(Size _size, const _Tp& value);
+ //! n-dim array constructor
+ Mat_(int _ndims, const int* _sizes);
+ //! n-dim array constructor that sets each matrix element to specified value
+ Mat_(int _ndims, const int* _sizes, const _Tp& value);
+ //! copy/conversion contructor. If m is of different type, it's converted
+ Mat_(const Mat& m);
+ //! copy constructor
+ Mat_(const Mat_& m);
+ //! constructs a matrix on top of user-allocated data. step is in bytes(!!!), regardless of the type
+ Mat_(int _rows, int _cols, _Tp* _data, size_t _step=AUTO_STEP);
+ //! constructs n-dim matrix on top of user-allocated data. steps are in bytes(!!!), regardless of the type
+ Mat_(int _ndims, const int* _sizes, _Tp* _data, const size_t* _steps=0);
+ //! selects a submatrix
+ Mat_(const Mat_& m, const Range& rowRange, const Range& colRange=Range::all());
+ //! selects a submatrix
+ Mat_(const Mat_& m, const Rect& roi);
+ //! selects a submatrix, n-dim version
+ Mat_(const Mat_& m, const Range* ranges);
+ //! from a matrix expression
+ explicit Mat_(const MatExpr& e);
+ //! makes a matrix out of Vec, std::vector, Point_ or Point3_. The matrix will have a single column
+ explicit Mat_(const vector<_Tp>& vec, bool copyData=false);
+ template<int n> explicit Mat_(const Vec<typename DataType<_Tp>::channel_type, n>& vec, bool copyData=true);
+ template<int m, int n> explicit Mat_(const Matx<typename DataType<_Tp>::channel_type, m, n>& mtx, bool copyData=true);
+ explicit Mat_(const Point_<typename DataType<_Tp>::channel_type>& pt, bool copyData=true);
+ explicit Mat_(const Point3_<typename DataType<_Tp>::channel_type>& pt, bool copyData=true);
+ explicit Mat_(const MatCommaInitializer_<_Tp>& commaInitializer);
+
+ Mat_& operator = (const Mat& m);
+ Mat_& operator = (const Mat_& m);
+ //! set all the elements to s.
+ Mat_& operator = (const _Tp& s);
+ //! assign a matrix expression
+ Mat_& operator = (const MatExpr& e);
+
+ //! iterators; they are smart enough to skip gaps in the end of rows
+ iterator begin();
+ iterator end();
+ const_iterator begin() const;
+ const_iterator end() const;
+
+ //! equivalent to Mat::create(_rows, _cols, DataType<_Tp>::type)
+ void create(int _rows, int _cols);
+ //! equivalent to Mat::create(_size, DataType<_Tp>::type)
+ void create(Size _size);
+ //! equivalent to Mat::create(_ndims, _sizes, DatType<_Tp>::type)
+ void create(int _ndims, const int* _sizes);
+ //! cross-product
+ Mat_ cross(const Mat_& m) const;
+ //! data type conversion
+ template<typename T2> operator Mat_<T2>() const;
+ //! overridden forms of Mat::row() etc.
+ Mat_ row(int y) const;
+ Mat_ col(int x) const;
+ Mat_ diag(int d=0) const;
+ Mat_ clone() const;
+
+ //! overridden forms of Mat::elemSize() etc.
+ size_t elemSize() const;
+ size_t elemSize1() const;
+ int type() const;
+ int depth() const;
+ int channels() const;
+ size_t step1(int i=0) const;
+ //! returns step()/sizeof(_Tp)
+ size_t stepT(int i=0) const;
+
+ //! overridden forms of Mat::zeros() etc. Data type is omitted, of course
+ static MatExpr zeros(int rows, int cols);
+ static MatExpr zeros(Size size);
+ static MatExpr zeros(int _ndims, const int* _sizes);
+ static MatExpr ones(int rows, int cols);
+ static MatExpr ones(Size size);
+ static MatExpr ones(int _ndims, const int* _sizes);
+ static MatExpr eye(int rows, int cols);
+ static MatExpr eye(Size size);
+
+ //! some more overriden methods
+ Mat_& adjustROI( int dtop, int dbottom, int dleft, int dright );
+ Mat_ operator()( const Range& rowRange, const Range& colRange ) const;
+ Mat_ operator()( const Rect& roi ) const;
+ Mat_ operator()( const Range* ranges ) const;
+
+ //! more convenient forms of row and element access operators
+ _Tp* operator [](int y);
+ const _Tp* operator [](int y) const;
+
+ //! returns reference to the specified element
+ _Tp& operator ()(const int* idx);
+ //! returns read-only reference to the specified element
+ const _Tp& operator ()(const int* idx) const;
+
+ //! returns reference to the specified element
+ template<int n> _Tp& operator ()(const Vec<int, n>& idx);
+ //! returns read-only reference to the specified element
+ template<int n> const _Tp& operator ()(const Vec<int, n>& idx) const;
+
+ //! returns reference to the specified element (1D case)
+ _Tp& operator ()(int idx0);
+ //! returns read-only reference to the specified element (1D case)
+ const _Tp& operator ()(int idx0) const;
+ //! returns reference to the specified element (2D case)
+ _Tp& operator ()(int idx0, int idx1);
+ //! returns read-only reference to the specified element (2D case)
+ const _Tp& operator ()(int idx0, int idx1) const;
+ //! returns reference to the specified element (3D case)
+ _Tp& operator ()(int idx0, int idx1, int idx2);
+ //! returns read-only reference to the specified element (3D case)
+ const _Tp& operator ()(int idx0, int idx1, int idx2) const;
+
+ _Tp& operator ()(Point pt);
+ const _Tp& operator ()(Point pt) const;
+
+ //! conversion to vector.
+ operator vector<_Tp>() const;
+ //! conversion to Vec
+ template<int n> operator Vec<typename DataType<_Tp>::channel_type, n>() const;
+ //! conversion to Matx
+ template<int m, int n> operator Matx<typename DataType<_Tp>::channel_type, m, n>() const;
+};
+
+typedef Mat_<uchar> Mat1b;
+typedef Mat_<Vec2b> Mat2b;
+typedef Mat_<Vec3b> Mat3b;
+typedef Mat_<Vec4b> Mat4b;
+
+typedef Mat_<short> Mat1s;
+typedef Mat_<Vec2s> Mat2s;
+typedef Mat_<Vec3s> Mat3s;
+typedef Mat_<Vec4s> Mat4s;
+
+typedef Mat_<ushort> Mat1w;
+typedef Mat_<Vec2w> Mat2w;
+typedef Mat_<Vec3w> Mat3w;
+typedef Mat_<Vec4w> Mat4w;
+
+typedef Mat_<int> Mat1i;
+typedef Mat_<Vec2i> Mat2i;
+typedef Mat_<Vec3i> Mat3i;
+typedef Mat_<Vec4i> Mat4i;
+
+typedef Mat_<float> Mat1f;
+typedef Mat_<Vec2f> Mat2f;
+typedef Mat_<Vec3f> Mat3f;
+typedef Mat_<Vec4f> Mat4f;
+
+typedef Mat_<double> Mat1d;
+typedef Mat_<Vec2d> Mat2d;
+typedef Mat_<Vec3d> Mat3d;
+typedef Mat_<Vec4d> Mat4d;
+
+//////////// Iterators & Comma initializers //////////////////
+
+class CV_EXPORTS MatConstIterator
+{
+public:
+ typedef uchar* value_type;
+ typedef ptrdiff_t difference_type;
+ typedef const uchar** pointer;
+ typedef uchar* reference;
+ typedef std::random_access_iterator_tag iterator_category;
+
+ //! default constructor
+ MatConstIterator();
+ //! constructor that sets the iterator to the beginning of the matrix
+ MatConstIterator(const Mat* _m);
+ //! constructor that sets the iterator to the specified element of the matrix
+ MatConstIterator(const Mat* _m, int _row, int _col=0);
+ //! constructor that sets the iterator to the specified element of the matrix
+ MatConstIterator(const Mat* _m, Point _pt);
+ //! constructor that sets the iterator to the specified element of the matrix
+ MatConstIterator(const Mat* _m, const int* _idx);
+ //! copy constructor
+ MatConstIterator(const MatConstIterator& it);
+
+ //! copy operator
+ MatConstIterator& operator = (const MatConstIterator& it);
+ //! returns the current matrix element
+ uchar* operator *() const;
+ //! returns the i-th matrix element, relative to the current
+ uchar* operator [](ptrdiff_t i) const;
+
+ //! shifts the iterator forward by the specified number of elements
+ MatConstIterator& operator += (ptrdiff_t ofs);
+ //! shifts the iterator backward by the specified number of elements
+ MatConstIterator& operator -= (ptrdiff_t ofs);
+ //! decrements the iterator
+ MatConstIterator& operator --();
+ //! decrements the iterator
+ MatConstIterator operator --(int);
+ //! increments the iterator
+ MatConstIterator& operator ++();
+ //! increments the iterator
+ MatConstIterator operator ++(int);
+ //! returns the current iterator position
+ Point pos() const;
+ //! returns the current iterator position
+ void pos(int* _idx) const;
+ ptrdiff_t lpos() const;
+ void seek(ptrdiff_t ofs, bool relative=false);
+ void seek(const int* _idx, bool relative=false);
+
+ const Mat* m;
+ size_t elemSize;
+ uchar* ptr;
+ uchar* sliceStart;
+ uchar* sliceEnd;
+};
+
+/*!
+ Matrix read-only iterator
+
+ */
+template<typename _Tp>
+class MatConstIterator_ : public MatConstIterator
+{
+public:
+ typedef _Tp value_type;
+ typedef ptrdiff_t difference_type;
+ typedef const _Tp* pointer;
+ typedef const _Tp& reference;
+ typedef std::random_access_iterator_tag iterator_category;
+
+ //! default constructor
+ MatConstIterator_();
+ //! constructor that sets the iterator to the beginning of the matrix
+ MatConstIterator_(const Mat_<_Tp>* _m);
+ //! constructor that sets the iterator to the specified element of the matrix
+ MatConstIterator_(const Mat_<_Tp>* _m, int _row, int _col=0);
+ //! constructor that sets the iterator to the specified element of the matrix
+ MatConstIterator_(const Mat_<_Tp>* _m, Point _pt);
+ //! constructor that sets the iterator to the specified element of the matrix
+ MatConstIterator_(const Mat_<_Tp>* _m, const int* _idx);
+ //! copy constructor
+ MatConstIterator_(const MatConstIterator_& it);
+
+ //! copy operator
+ MatConstIterator_& operator = (const MatConstIterator_& it);
+ //! returns the current matrix element
+ _Tp operator *() const;
+ //! returns the i-th matrix element, relative to the current
+ _Tp operator [](ptrdiff_t i) const;
+
+ //! shifts the iterator forward by the specified number of elements
+ MatConstIterator_& operator += (ptrdiff_t ofs);
+ //! shifts the iterator backward by the specified number of elements
+ MatConstIterator_& operator -= (ptrdiff_t ofs);
+ //! decrements the iterator
+ MatConstIterator_& operator --();
+ //! decrements the iterator
+ MatConstIterator_ operator --(int);
+ //! increments the iterator
+ MatConstIterator_& operator ++();
+ //! increments the iterator
+ MatConstIterator_ operator ++(int);
+ //! returns the current iterator position
+ Point pos() const;
+};
+
+
+/*!
+ Matrix read-write iterator
+
+*/
+template<typename _Tp>
+class MatIterator_ : public MatConstIterator_<_Tp>
+{
+public:
+ typedef _Tp* pointer;
+ typedef _Tp& reference;
+ typedef std::random_access_iterator_tag iterator_category;
+
+ //! the default constructor
+ MatIterator_();
+ //! constructor that sets the iterator to the beginning of the matrix
+ MatIterator_(Mat_<_Tp>* _m);
+ //! constructor that sets the iterator to the specified element of the matrix
+ MatIterator_(Mat_<_Tp>* _m, int _row, int _col=0);
+ //! constructor that sets the iterator to the specified element of the matrix
+ MatIterator_(const Mat_<_Tp>* _m, Point _pt);
+ //! constructor that sets the iterator to the specified element of the matrix
+ MatIterator_(const Mat_<_Tp>* _m, const int* _idx);
+ //! copy constructor
+ MatIterator_(const MatIterator_& it);
+ //! copy operator
+ MatIterator_& operator = (const MatIterator_<_Tp>& it );
+
+ //! returns the current matrix element
+ _Tp& operator *() const;
+ //! returns the i-th matrix element, relative to the current
+ _Tp& operator [](ptrdiff_t i) const;
+
+ //! shifts the iterator forward by the specified number of elements
+ MatIterator_& operator += (ptrdiff_t ofs);
+ //! shifts the iterator backward by the specified number of elements
+ MatIterator_& operator -= (ptrdiff_t ofs);
+ //! decrements the iterator
+ MatIterator_& operator --();
+ //! decrements the iterator
+ MatIterator_ operator --(int);
+ //! increments the iterator
+ MatIterator_& operator ++();
+ //! increments the iterator
+ MatIterator_ operator ++(int);
+};
+
+template<typename _Tp> class MatOp_Iter_;
+
+/*!
+ Comma-separated Matrix Initializer
+
+ The class instances are usually not created explicitly.
+ Instead, they are created on "matrix << firstValue" operator.
+
+ The sample below initializes 2x2 rotation matrix:
+
+ \code
+ double angle = 30, a = cos(angle*CV_PI/180), b = sin(angle*CV_PI/180);
+ Mat R = (Mat_<double>(2,2) << a, -b, b, a);
+ \endcode
+*/
+template<typename _Tp> class MatCommaInitializer_
+{
+public:
+ //! the constructor, created by "matrix << firstValue" operator, where matrix is cv::Mat
+ MatCommaInitializer_(Mat_<_Tp>* _m);
+ //! the operator that takes the next value and put it to the matrix
+ template<typename T2> MatCommaInitializer_<_Tp>& operator , (T2 v);
+ //! another form of conversion operator
+ Mat_<_Tp> operator *() const;
+ operator Mat_<_Tp>() const;
+protected:
+ MatIterator_<_Tp> it;
+};
+
+
+template<typename _Tp, int m, int n> class MatxCommaInitializer
+{
+public:
+ MatxCommaInitializer(Matx<_Tp, m, n>* _mtx);
+ template<typename T2> MatxCommaInitializer<_Tp, m, n>& operator , (T2 val);
+ Matx<_Tp, m, n> operator *() const;
+
+ Matx<_Tp, m, n>* dst;
+ int idx;
+};
+
+template<typename _Tp, int m> class VecCommaInitializer : public MatxCommaInitializer<_Tp, m, 1>
+{
+public:
+ VecCommaInitializer(Vec<_Tp, m>* _vec);
+ template<typename T2> VecCommaInitializer<_Tp, m>& operator , (T2 val);
+ Vec<_Tp, m> operator *() const;
+};
+
+/*!
+ Automatically Allocated Buffer Class
+
+ The class is used for temporary buffers in functions and methods.
+ If a temporary buffer is usually small (a few K's of memory),
+ but its size depends on the parameters, it makes sense to create a small
+ fixed-size array on stack and use it if it's large enough. If the required buffer size
+ is larger than the fixed size, another buffer of sufficient size is allocated dynamically
+ and released after the processing. Therefore, in typical cases, when the buffer size is small,
+ there is no overhead associated with malloc()/free().
+ At the same time, there is no limit on the size of processed data.
+
+ This is what AutoBuffer does. The template takes 2 parameters - type of the buffer elements and
+ the number of stack-allocated elements. Here is how the class is used:
+
+ \code
+ void my_func(const cv::Mat& m)
+ {
+ cv::AutoBuffer<float, 1000> buf; // create automatic buffer containing 1000 floats
+
+ buf.allocate(m.rows); // if m.rows <= 1000, the pre-allocated buffer is used,
+ // otherwise the buffer of "m.rows" floats will be allocated
+ // dynamically and deallocated in cv::AutoBuffer destructor
+ ...
+ }
+ \endcode
+*/
+template<typename _Tp, size_t fixed_size=4096/sizeof(_Tp)+8> class AutoBuffer
+{
+public:
+ typedef _Tp value_type;
+ enum { buffer_padding = (int)((16 + sizeof(_Tp) - 1)/sizeof(_Tp)) };
+
+ //! the default contructor
+ AutoBuffer();
+ //! constructor taking the real buffer size
+ AutoBuffer(size_t _size);
+ //! destructor. calls deallocate()
+ ~AutoBuffer();
+
+ //! allocates the new buffer of size _size. if the _size is small enough, stack-allocated buffer is used
+ void allocate(size_t _size);
+ //! deallocates the buffer if it was dynamically allocated
+ void deallocate();
+ //! returns pointer to the real buffer, stack-allocated or head-allocated
+ operator _Tp* ();
+ //! returns read-only pointer to the real buffer, stack-allocated or head-allocated
+ operator const _Tp* () const;
+
+protected:
+ //! pointer to the real buffer, can point to buf if the buffer is small enough
+ _Tp* ptr;
+ //! size of the real buffer
+ size_t size;
+ //! pre-allocated buffer
+ _Tp buf[fixed_size+buffer_padding];
+};
+
+/////////////////////////// multi-dimensional dense matrix //////////////////////////
+
+/*!
+ n-Dimensional Dense Matrix Iterator Class.
+
+ The class cv::NAryMatIterator is used for iterating over one or more n-dimensional dense arrays (cv::Mat's).
+
+ The iterator is completely different from cv::Mat_ and cv::SparseMat_ iterators.
+ It iterates through the slices (or planes), not the elements, where "slice" is a continuous part of the arrays.
+
+ Here is the example on how the iterator can be used to normalize 3D histogram:
+
+ \code
+ void normalizeColorHist(Mat& hist)
+ {
+ #if 1
+ // intialize iterator (the style is different from STL).
+ // after initialization the iterator will contain
+ // the number of slices or planes
+ // the iterator will go through
+ Mat* arrays[] = { &hist, 0 };
+ Mat planes[1];
+ NAryMatIterator it(arrays, planes);
+ double s = 0;
+ // iterate through the matrix. on each iteration
+ // it.planes[i] (of type Mat) will be set to the current plane of
+ // i-th n-dim matrix passed to the iterator constructor.
+ for(int p = 0; p < it.nplanes; p++, ++it)
+ s += sum(it.planes[0])[0];
+ it = NAryMatIterator(hist);
+ s = 1./s;
+ for(int p = 0; p < it.nplanes; p++, ++it)
+ it.planes[0] *= s;
+ #elif 1
+ // this is a shorter implementation of the above
+ // using built-in operations on Mat
+ double s = sum(hist)[0];
+ hist.convertTo(hist, hist.type(), 1./s, 0);
+ #else
+ // and this is even shorter one
+ // (assuming that the histogram elements are non-negative)
+ normalize(hist, hist, 1, 0, NORM_L1);
+ #endif
+ }
+ \endcode
+
+ You can iterate through several matrices simultaneously as long as they have the same geometry
+ (dimensionality and all the dimension sizes are the same), which is useful for binary
+ and n-ary operations on such matrices. Just pass those matrices to cv::MatNDIterator.
+ Then, during the iteration it.planes[0], it.planes[1], ... will
+ be the slices of the corresponding matrices
+*/
+class CV_EXPORTS NAryMatIterator
+{
+public:
+ //! the default constructor
+ NAryMatIterator();
+ //! the full constructor taking arbitrary number of n-dim matrices
+ NAryMatIterator(const Mat** arrays, uchar** ptrs, int narrays=-1);
+ //! the full constructor taking arbitrary number of n-dim matrices
+ NAryMatIterator(const Mat** arrays, Mat* planes, int narrays=-1);
+ //! the separate iterator initialization method
+ void init(const Mat** arrays, Mat* planes, uchar** ptrs, int narrays=-1);
+
+ //! proceeds to the next plane of every iterated matrix
+ NAryMatIterator& operator ++();
+ //! proceeds to the next plane of every iterated matrix (postfix increment operator)
+ NAryMatIterator operator ++(int);
+
+ //! the iterated arrays
+ const Mat** arrays;
+ //! the current planes
+ Mat* planes;
+ //! data pointers
+ uchar** ptrs;
+ //! the number of arrays
+ int narrays;
+ //! the number of hyper-planes that the iterator steps through
+ size_t nplanes;
+ //! the size of each segment (in elements)
+ size_t size;
+protected:
+ int iterdepth;
+ size_t idx;
+};
+
+//typedef NAryMatIterator NAryMatNDIterator;
+
+typedef void (*ConvertData)(const void* from, void* to, int cn);
+typedef void (*ConvertScaleData)(const void* from, void* to, int cn, double alpha, double beta);
+
+//! returns the function for converting pixels from one data type to another
+CV_EXPORTS ConvertData getConvertElem(int fromType, int toType);
+//! returns the function for converting pixels from one data type to another with the optional scaling
+CV_EXPORTS ConvertScaleData getConvertScaleElem(int fromType, int toType);
+
+
+/////////////////////////// multi-dimensional sparse matrix //////////////////////////
+
+class SparseMatIterator;
+class SparseMatConstIterator;
+template<typename _Tp> class SparseMatIterator_;
+template<typename _Tp> class SparseMatConstIterator_;
+
+/*!
+ Sparse matrix class.
+
+ The class represents multi-dimensional sparse numerical arrays. Such a sparse array can store elements
+ of any type that cv::Mat is able to store. "Sparse" means that only non-zero elements
+ are stored (though, as a result of some operations on a sparse matrix, some of its stored elements
+ can actually become 0. It's user responsibility to detect such elements and delete them using cv::SparseMat::erase().
+ The non-zero elements are stored in a hash table that grows when it's filled enough,
+ so that the search time remains O(1) in average. Elements can be accessed using the following methods:
+
+ <ol>
+ <li>Query operations: cv::SparseMat::ptr() and the higher-level cv::SparseMat::ref(),
+ cv::SparseMat::value() and cv::SparseMat::find, for example:
+ \code
+ const int dims = 5;
+ int size[] = {10, 10, 10, 10, 10};
+ SparseMat sparse_mat(dims, size, CV_32F);
+ for(int i = 0; i < 1000; i++)
+ {
+ int idx[dims];
+ for(int k = 0; k < dims; k++)
+ idx[k] = rand()%sparse_mat.size(k);
+ sparse_mat.ref<float>(idx) += 1.f;
+ }
+ \endcode
+
+ <li>Sparse matrix iterators. Like cv::Mat iterators and unlike cv::Mat iterators, the sparse matrix iterators are STL-style,
+ that is, the iteration is done as following:
+ \code
+ // prints elements of a sparse floating-point matrix and the sum of elements.
+ SparseMatConstIterator_<float>
+ it = sparse_mat.begin<float>(),
+ it_end = sparse_mat.end<float>();
+ double s = 0;
+ int dims = sparse_mat.dims();
+ for(; it != it_end; ++it)
+ {
+ // print element indices and the element value
+ const Node* n = it.node();
+ printf("(")
+ for(int i = 0; i < dims; i++)
+ printf("%3d%c", n->idx[i], i < dims-1 ? ',' : ')');
+ printf(": %f\n", *it);
+ s += *it;
+ }
+ printf("Element sum is %g\n", s);
+ \endcode
+ If you run this loop, you will notice that elements are enumerated
+ in no any logical order (lexicographical etc.),
+ they come in the same order as they stored in the hash table, i.e. semi-randomly.
+
+ You may collect pointers to the nodes and sort them to get the proper ordering.
+ Note, however, that pointers to the nodes may become invalid when you add more
+ elements to the matrix; this is because of possible buffer reallocation.
+
+ <li>A combination of the above 2 methods when you need to process 2 or more sparse
+ matrices simultaneously, e.g. this is how you can compute unnormalized
+ cross-correlation of the 2 floating-point sparse matrices:
+ \code
+ double crossCorr(const SparseMat& a, const SparseMat& b)
+ {
+ const SparseMat *_a = &a, *_b = &b;
+ // if b contains less elements than a,
+ // it's faster to iterate through b
+ if(_a->nzcount() > _b->nzcount())
+ std::swap(_a, _b);
+ SparseMatConstIterator_<float> it = _a->begin<float>(),
+ it_end = _a->end<float>();
+ double ccorr = 0;
+ for(; it != it_end; ++it)
+ {
+ // take the next element from the first matrix
+ float avalue = *it;
+ const Node* anode = it.node();
+ // and try to find element with the same index in the second matrix.
+ // since the hash value depends only on the element index,
+ // we reuse hashvalue stored in the node
+ float bvalue = _b->value<float>(anode->idx,&anode->hashval);
+ ccorr += avalue*bvalue;
+ }
+ return ccorr;
+ }
+ \endcode
+ </ol>
+*/
+class CV_EXPORTS SparseMat
+{
+public:
+ typedef SparseMatIterator iterator;
+ typedef SparseMatConstIterator const_iterator;
+
+ //! the sparse matrix header
+ struct CV_EXPORTS Hdr
+ {
+ Hdr(int _dims, const int* _sizes, int _type);
+ void clear();
+ int refcount;
+ int dims;
+ int valueOffset;
+ size_t nodeSize;
+ size_t nodeCount;
+ size_t freeList;
+ vector<uchar> pool;
+ vector<size_t> hashtab;
+ int size[CV_MAX_DIM];
+ };
+
+ //! sparse matrix node - element of a hash table
+ struct CV_EXPORTS Node
+ {
+ //! hash value
+ size_t hashval;
+ //! index of the next node in the same hash table entry
+ size_t next;
+ //! index of the matrix element
+ int idx[CV_MAX_DIM];
+ };
+
+ //! default constructor
+ SparseMat();
+ //! creates matrix of the specified size and type
+ SparseMat(int dims, const int* _sizes, int _type);
+ //! copy constructor
+ SparseMat(const SparseMat& m);
+ //! converts dense 2d matrix to the sparse form
+ /*!
+ \param m the input matrix
+ */
+ explicit SparseMat(const Mat& m);
+ //! converts old-style sparse matrix to the new-style. All the data is copied
+ SparseMat(const CvSparseMat* m);
+ //! the destructor
+ ~SparseMat();
+
+ //! assignment operator. This is O(1) operation, i.e. no data is copied
+ SparseMat& operator = (const SparseMat& m);
+ //! equivalent to the corresponding constructor
+ SparseMat& operator = (const Mat& m);
+
+ //! creates full copy of the matrix
+ SparseMat clone() const;
+
+ //! copies all the data to the destination matrix. All the previous content of m is erased
+ void copyTo( SparseMat& m ) const;
+ //! converts sparse matrix to dense matrix.
+ void copyTo( Mat& m ) const;
+ //! multiplies all the matrix elements by the specified scale factor alpha and converts the results to the specified data type
+ void convertTo( SparseMat& m, int rtype, double alpha=1 ) const;
+ //! converts sparse matrix to dense n-dim matrix with optional type conversion and scaling.
+ /*!
+ \param m Destination matrix
+ \param rtype The output matrix data type. When it is =-1, the output array will have the same data type as (*this)
+ \param alpha The scale factor
+ \param beta The optional delta added to the scaled values before the conversion
+ */
+ void convertTo( Mat& m, int rtype, double alpha=1, double beta=0 ) const;
+
+ // not used now
+ void assignTo( SparseMat& m, int type=-1 ) const;
+
+ //! reallocates sparse matrix.
+ /*!
+ If the matrix already had the proper size and type,
+ it is simply cleared with clear(), otherwise,
+ the old matrix is released (using release()) and the new one is allocated.
+ */
+ void create(int dims, const int* _sizes, int _type);
+ //! sets all the sparse matrix elements to 0, which means clearing the hash table.
+ void clear();
+ //! manually increments the reference counter to the header.
+ void addref();
+ // decrements the header reference counter. When the counter reaches 0, the header and all the underlying data are deallocated.
+ void release();
+
+ //! converts sparse matrix to the old-style representation; all the elements are copied.
+ operator CvSparseMat*() const;
+ //! returns the size of each element in bytes (not including the overhead - the space occupied by SparseMat::Node elements)
+ size_t elemSize() const;
+ //! returns elemSize()/channels()
+ size_t elemSize1() const;
+
+ //! returns type of sparse matrix elements
+ int type() const;
+ //! returns the depth of sparse matrix elements
+ int depth() const;
+ //! returns the number of channels
+ int channels() const;
+
+ //! returns the array of sizes, or NULL if the matrix is not allocated
+ const int* size() const;
+ //! returns the size of i-th matrix dimension (or 0)
+ int size(int i) const;
+ //! returns the matrix dimensionality
+ int dims() const;
+ //! returns the number of non-zero elements (=the number of hash table nodes)
+ size_t nzcount() const;
+
+ //! computes the element hash value (1D case)
+ size_t hash(int i0) const;
+ //! computes the element hash value (2D case)
+ size_t hash(int i0, int i1) const;
+ //! computes the element hash value (3D case)
+ size_t hash(int i0, int i1, int i2) const;
+ //! computes the element hash value (nD case)
+ size_t hash(const int* idx) const;
+
+ //@{
+ /*!
+ specialized variants for 1D, 2D, 3D cases and the generic_type one for n-D case.
+
+ return pointer to the matrix element.
+ <ul>
+ <li>if the element is there (it's non-zero), the pointer to it is returned
+ <li>if it's not there and createMissing=false, NULL pointer is returned
+ <li>if it's not there and createMissing=true, then the new element
+ is created and initialized with 0. Pointer to it is returned
+ <li>if the optional hashval pointer is not NULL, the element hash value is
+ not computed, but *hashval is taken instead.
+ </ul>
+ */
+ //! returns pointer to the specified element (1D case)
+ uchar* ptr(int i0, bool createMissing, size_t* hashval=0);
+ //! returns pointer to the specified element (2D case)
+ uchar* ptr(int i0, int i1, bool createMissing, size_t* hashval=0);
+ //! returns pointer to the specified element (3D case)
+ uchar* ptr(int i0, int i1, int i2, bool createMissing, size_t* hashval=0);
+ //! returns pointer to the specified element (nD case)
+ uchar* ptr(const int* idx, bool createMissing, size_t* hashval=0);
+ //@}
+
+ //@{
+ /*!
+ return read-write reference to the specified sparse matrix element.
+
+ ref<_Tp>(i0,...[,hashval]) is equivalent to *(_Tp*)ptr(i0,...,true[,hashval]).
+ The methods always return a valid reference.
+ If the element did not exist, it is created and initialiazed with 0.
+ */
+ //! returns reference to the specified element (1D case)
+ template<typename _Tp> _Tp& ref(int i0, size_t* hashval=0);
+ //! returns reference to the specified element (2D case)
+ template<typename _Tp> _Tp& ref(int i0, int i1, size_t* hashval=0);
+ //! returns reference to the specified element (3D case)
+ template<typename _Tp> _Tp& ref(int i0, int i1, int i2, size_t* hashval=0);
+ //! returns reference to the specified element (nD case)
+ template<typename _Tp> _Tp& ref(const int* idx, size_t* hashval=0);
+ //@}
+
+ //@{
+ /*!
+ return value of the specified sparse matrix element.
+
+ value<_Tp>(i0,...[,hashval]) is equivalent
+
+ \code
+ { const _Tp* p = find<_Tp>(i0,...[,hashval]); return p ? *p : _Tp(); }
+ \endcode
+
+ That is, if the element did not exist, the methods return 0.
+ */
+ //! returns value of the specified element (1D case)
+ template<typename _Tp> _Tp value(int i0, size_t* hashval=0) const;
+ //! returns value of the specified element (2D case)
+ template<typename _Tp> _Tp value(int i0, int i1, size_t* hashval=0) const;
+ //! returns value of the specified element (3D case)
+ template<typename _Tp> _Tp value(int i0, int i1, int i2, size_t* hashval=0) const;
+ //! returns value of the specified element (nD case)
+ template<typename _Tp> _Tp value(const int* idx, size_t* hashval=0) const;
+ //@}
+
+ //@{
+ /*!
+ Return pointer to the specified sparse matrix element if it exists
+
+ find<_Tp>(i0,...[,hashval]) is equivalent to (_const Tp*)ptr(i0,...false[,hashval]).
+
+ If the specified element does not exist, the methods return NULL.
+ */
+ //! returns pointer to the specified element (1D case)
+ template<typename _Tp> const _Tp* find(int i0, size_t* hashval=0) const;
+ //! returns pointer to the specified element (2D case)
+ template<typename _Tp> const _Tp* find(int i0, int i1, size_t* hashval=0) const;
+ //! returns pointer to the specified element (3D case)
+ template<typename _Tp> const _Tp* find(int i0, int i1, int i2, size_t* hashval=0) const;
+ //! returns pointer to the specified element (nD case)
+ template<typename _Tp> const _Tp* find(const int* idx, size_t* hashval=0) const;
+
+ //! erases the specified element (2D case)
+ void erase(int i0, int i1, size_t* hashval=0);
+ //! erases the specified element (3D case)
+ void erase(int i0, int i1, int i2, size_t* hashval=0);
+ //! erases the specified element (nD case)
+ void erase(const int* idx, size_t* hashval=0);
+
+ //@{
+ /*!
+ return the sparse matrix iterator pointing to the first sparse matrix element
+ */
+ //! returns the sparse matrix iterator at the matrix beginning
+ SparseMatIterator begin();
+ //! returns the sparse matrix iterator at the matrix beginning
+ template<typename _Tp> SparseMatIterator_<_Tp> begin();
+ //! returns the read-only sparse matrix iterator at the matrix beginning
+ SparseMatConstIterator begin() const;
+ //! returns the read-only sparse matrix iterator at the matrix beginning
+ template<typename _Tp> SparseMatConstIterator_<_Tp> begin() const;
+ //@}
+ /*!
+ return the sparse matrix iterator pointing to the element following the last sparse matrix element
+ */
+ //! returns the sparse matrix iterator at the matrix end
+ SparseMatIterator end();
+ //! returns the read-only sparse matrix iterator at the matrix end
+ SparseMatConstIterator end() const;
+ //! returns the typed sparse matrix iterator at the matrix end
+ template<typename _Tp> SparseMatIterator_<_Tp> end();
+ //! returns the typed read-only sparse matrix iterator at the matrix end
+ template<typename _Tp> SparseMatConstIterator_<_Tp> end() const;
+
+ //! returns the value stored in the sparse martix node
+ template<typename _Tp> _Tp& value(Node* n);
+ //! returns the value stored in the sparse martix node
+ template<typename _Tp> const _Tp& value(const Node* n) const;
+
+ ////////////// some internal-use methods ///////////////
+ Node* node(size_t nidx);
+ const Node* node(size_t nidx) const;
+
+ uchar* newNode(const int* idx, size_t hashval);
+ void removeNode(size_t hidx, size_t nidx, size_t previdx);
+ void resizeHashTab(size_t newsize);
+
+ enum { MAGIC_VAL=0x42FD0000, MAX_DIM=CV_MAX_DIM, HASH_SCALE=0x5bd1e995, HASH_BIT=0x80000000 };
+
+ int flags;
+ Hdr* hdr;
+};
+
+//! finds global minimum and maximum sparse array elements and returns their values and their locations
+CV_EXPORTS void minMaxLoc(const SparseMat& a, double* minVal,
+ double* maxVal, int* minIdx=0, int* maxIdx=0);
+//! computes norm of a sparse matrix
+CV_EXPORTS double norm( const SparseMat& src, int normType );
+//! scales and shifts array elements so that either the specified norm (alpha) or the minimum (alpha) and maximum (beta) array values get the specified values
+CV_EXPORTS void normalize( const SparseMat& src, SparseMat& dst, double alpha, int normType );
+
+/*!
+ Read-Only Sparse Matrix Iterator.
+ Here is how to use the iterator to compute the sum of floating-point sparse matrix elements:
+
+ \code
+ SparseMatConstIterator it = m.begin(), it_end = m.end();
+ double s = 0;
+ CV_Assert( m.type() == CV_32F );
+ for( ; it != it_end; ++it )
+ s += it.value<float>();
+ \endcode
+*/
+class CV_EXPORTS SparseMatConstIterator
+{
+public:
+ //! the default constructor
+ SparseMatConstIterator();
+ //! the full constructor setting the iterator to the first sparse matrix element
+ SparseMatConstIterator(const SparseMat* _m);
+ //! the copy constructor
+ SparseMatConstIterator(const SparseMatConstIterator& it);
+
+ //! the assignment operator
+ SparseMatConstIterator& operator = (const SparseMatConstIterator& it);
+
+ //! template method returning the current matrix element
+ template<typename _Tp> const _Tp& value() const;
+ //! returns the current node of the sparse matrix. it.node->idx is the current element index
+ const SparseMat::Node* node() const;
+
+ //! moves iterator to the previous element
+ SparseMatConstIterator& operator --();
+ //! moves iterator to the previous element
+ SparseMatConstIterator operator --(int);
+ //! moves iterator to the next element
+ SparseMatConstIterator& operator ++();
+ //! moves iterator to the next element
+ SparseMatConstIterator operator ++(int);
+
+ //! moves iterator to the element after the last element
+ void seekEnd();
+
+ const SparseMat* m;
+ size_t hashidx;
+ uchar* ptr;
+};
+
+/*!
+ Read-write Sparse Matrix Iterator
+
+ The class is similar to cv::SparseMatConstIterator,
+ but can be used for in-place modification of the matrix elements.
+*/
+class CV_EXPORTS SparseMatIterator : public SparseMatConstIterator
+{
+public:
+ //! the default constructor
+ SparseMatIterator();
+ //! the full constructor setting the iterator to the first sparse matrix element
+ SparseMatIterator(SparseMat* _m);
+ //! the full constructor setting the iterator to the specified sparse matrix element
+ SparseMatIterator(SparseMat* _m, const int* idx);
+ //! the copy constructor
+ SparseMatIterator(const SparseMatIterator& it);
+
+ //! the assignment operator
+ SparseMatIterator& operator = (const SparseMatIterator& it);
+ //! returns read-write reference to the current sparse matrix element
+ template<typename _Tp> _Tp& value() const;
+ //! returns pointer to the current sparse matrix node. it.node->idx is the index of the current element (do not modify it!)
+ SparseMat::Node* node() const;
+
+ //! moves iterator to the next element
+ SparseMatIterator& operator ++();
+ //! moves iterator to the next element
+ SparseMatIterator operator ++(int);
+};
+
+/*!
+ The Template Sparse Matrix class derived from cv::SparseMat
+
+ The class provides slightly more convenient operations for accessing elements.
+
+ \code
+ SparseMat m;
+ ...
+ SparseMat_<int> m_ = (SparseMat_<int>&)m;
+ m_.ref(1)++; // equivalent to m.ref<int>(1)++;
+ m_.ref(2) += m_(3); // equivalent to m.ref<int>(2) += m.value<int>(3);
+ \endcode
+*/
+template<typename _Tp> class SparseMat_ : public SparseMat
+{
+public:
+ typedef SparseMatIterator_<_Tp> iterator;
+ typedef SparseMatConstIterator_<_Tp> const_iterator;
+
+ //! the default constructor
+ SparseMat_();
+ //! the full constructor equivelent to SparseMat(dims, _sizes, DataType<_Tp>::type)
+ SparseMat_(int dims, const int* _sizes);
+ //! the copy constructor. If DataType<_Tp>.type != m.type(), the m elements are converted
+ SparseMat_(const SparseMat& m);
+ //! the copy constructor. This is O(1) operation - no data is copied
+ SparseMat_(const SparseMat_& m);
+ //! converts dense matrix to the sparse form
+ SparseMat_(const Mat& m);
+ //! converts the old-style sparse matrix to the C++ class. All the elements are copied
+ SparseMat_(const CvSparseMat* m);
+ //! the assignment operator. If DataType<_Tp>.type != m.type(), the m elements are converted
+ SparseMat_& operator = (const SparseMat& m);
+ //! the assignment operator. This is O(1) operation - no data is copied
+ SparseMat_& operator = (const SparseMat_& m);
+ //! converts dense matrix to the sparse form
+ SparseMat_& operator = (const Mat& m);
+
+ //! makes full copy of the matrix. All the elements are duplicated
+ SparseMat_ clone() const;
+ //! equivalent to cv::SparseMat::create(dims, _sizes, DataType<_Tp>::type)
+ void create(int dims, const int* _sizes);
+ //! converts sparse matrix to the old-style CvSparseMat. All the elements are copied
+ operator CvSparseMat*() const;
+
+ //! returns type of the matrix elements
+ int type() const;
+ //! returns depth of the matrix elements
+ int depth() const;
+ //! returns the number of channels in each matrix element
+ int channels() const;
+
+ //! equivalent to SparseMat::ref<_Tp>(i0, hashval)
+ _Tp& ref(int i0, size_t* hashval=0);
+ //! equivalent to SparseMat::ref<_Tp>(i0, i1, hashval)
+ _Tp& ref(int i0, int i1, size_t* hashval=0);
+ //! equivalent to SparseMat::ref<_Tp>(i0, i1, i2, hashval)
+ _Tp& ref(int i0, int i1, int i2, size_t* hashval=0);
+ //! equivalent to SparseMat::ref<_Tp>(idx, hashval)
+ _Tp& ref(const int* idx, size_t* hashval=0);
+
+ //! equivalent to SparseMat::value<_Tp>(i0, hashval)
+ _Tp operator()(int i0, size_t* hashval=0) const;
+ //! equivalent to SparseMat::value<_Tp>(i0, i1, hashval)
+ _Tp operator()(int i0, int i1, size_t* hashval=0) const;
+ //! equivalent to SparseMat::value<_Tp>(i0, i1, i2, hashval)
+ _Tp operator()(int i0, int i1, int i2, size_t* hashval=0) const;
+ //! equivalent to SparseMat::value<_Tp>(idx, hashval)
+ _Tp operator()(const int* idx, size_t* hashval=0) const;
+
+ //! returns sparse matrix iterator pointing to the first sparse matrix element
+ SparseMatIterator_<_Tp> begin();
+ //! returns read-only sparse matrix iterator pointing to the first sparse matrix element
+ SparseMatConstIterator_<_Tp> begin() const;
+ //! returns sparse matrix iterator pointing to the element following the last sparse matrix element
+ SparseMatIterator_<_Tp> end();
+ //! returns read-only sparse matrix iterator pointing to the element following the last sparse matrix element
+ SparseMatConstIterator_<_Tp> end() const;
+};
+
+
+/*!
+ Template Read-Only Sparse Matrix Iterator Class.
+
+ This is the derived from SparseMatConstIterator class that
+ introduces more convenient operator *() for accessing the current element.
+*/
+template<typename _Tp> class SparseMatConstIterator_ : public SparseMatConstIterator
+{
+public:
+ typedef std::forward_iterator_tag iterator_category;
+
+ //! the default constructor
+ SparseMatConstIterator_();
+ //! the full constructor setting the iterator to the first sparse matrix element
+ SparseMatConstIterator_(const SparseMat_<_Tp>* _m);
+ SparseMatConstIterator_(const SparseMat* _m);
+ //! the copy constructor
+ SparseMatConstIterator_(const SparseMatConstIterator_& it);
+
+ //! the assignment operator
+ SparseMatConstIterator_& operator = (const SparseMatConstIterator_& it);
+ //! the element access operator
+ const _Tp& operator *() const;
+
+ //! moves iterator to the next element
+ SparseMatConstIterator_& operator ++();
+ //! moves iterator to the next element
+ SparseMatConstIterator_ operator ++(int);
+};
+
+/*!
+ Template Read-Write Sparse Matrix Iterator Class.
+
+ This is the derived from cv::SparseMatConstIterator_ class that
+ introduces more convenient operator *() for accessing the current element.
+*/
+template<typename _Tp> class SparseMatIterator_ : public SparseMatConstIterator_<_Tp>
+{
+public:
+ typedef std::forward_iterator_tag iterator_category;
+
+ //! the default constructor
+ SparseMatIterator_();
+ //! the full constructor setting the iterator to the first sparse matrix element
+ SparseMatIterator_(SparseMat_<_Tp>* _m);
+ SparseMatIterator_(SparseMat* _m);
+ //! the copy constructor
+ SparseMatIterator_(const SparseMatIterator_& it);
+
+ //! the assignment operator
+ SparseMatIterator_& operator = (const SparseMatIterator_& it);
+ //! returns the reference to the current element
+ _Tp& operator *() const;
+
+ //! moves the iterator to the next element
+ SparseMatIterator_& operator ++();
+ //! moves the iterator to the next element
+ SparseMatIterator_ operator ++(int);
+};
+
+//////////////////// Fast Nearest-Neighbor Search Structure ////////////////////
+
+/*!
+ Fast Nearest Neighbor Search Class.
+
+ The class implements D. Lowe BBF (Best-Bin-First) algorithm for the last
+ approximate (or accurate) nearest neighbor search in multi-dimensional spaces.
+
+ First, a set of vectors is passed to KDTree::KDTree() constructor
+ or KDTree::build() method, where it is reordered.
+
+ Then arbitrary vectors can be passed to KDTree::findNearest() methods, which
+ find the K nearest neighbors among the vectors from the initial set.
+ The user can balance between the speed and accuracy of the search by varying Emax
+ parameter, which is the number of leaves that the algorithm checks.
+ The larger parameter values yield more accurate results at the expense of lower processing speed.
+
+ \code
+ KDTree T(points, false);
+ const int K = 3, Emax = INT_MAX;
+ int idx[K];
+ float dist[K];
+ T.findNearest(query_vec, K, Emax, idx, 0, dist);
+ CV_Assert(dist[0] <= dist[1] && dist[1] <= dist[2]);
+ \endcode
+*/
+class CV_EXPORTS_W KDTree
+{
+public:
+ /*!
+ The node of the search tree.
+ */
+ struct Node
+ {
+ Node() : idx(-1), left(-1), right(-1), boundary(0.f) {}
+ Node(int _idx, int _left, int _right, float _boundary)
+ : idx(_idx), left(_left), right(_right), boundary(_boundary) {}
+ //! split dimension; >=0 for nodes (dim), < 0 for leaves (index of the point)
+ int idx;
+ //! node indices of the left and the right branches
+ int left, right;
+ //! go to the left if query_vec[node.idx]<=node.boundary, otherwise go to the right
+ float boundary;
+ };
+
+ //! the default constructor
+ CV_WRAP KDTree();
+ //! the full constructor that builds the search tree
+ CV_WRAP KDTree(InputArray points, bool copyAndReorderPoints=false);
+ //! the full constructor that builds the search tree
+ CV_WRAP KDTree(InputArray points, InputArray _labels,
+ bool copyAndReorderPoints=false);
+ //! builds the search tree
+ CV_WRAP void build(InputArray points, bool copyAndReorderPoints=false);
+ //! builds the search tree
+ CV_WRAP void build(InputArray points, InputArray labels,
+ bool copyAndReorderPoints=false);
+ //! finds the K nearest neighbors of "vec" while looking at Emax (at most) leaves
+ CV_WRAP int findNearest(InputArray vec, int K, int Emax,
+ OutputArray neighborsIdx,
+ OutputArray neighbors=noArray(),
+ OutputArray dist=noArray(),
+ OutputArray labels=noArray()) const;
+ //! finds all the points from the initial set that belong to the specified box
+ CV_WRAP void findOrthoRange(InputArray minBounds,
+ InputArray maxBounds,
+ OutputArray neighborsIdx,
+ OutputArray neighbors=noArray(),
+ OutputArray labels=noArray()) const;
+ //! returns vectors with the specified indices
+ CV_WRAP void getPoints(InputArray idx, OutputArray pts,
+ OutputArray labels=noArray()) const;
+ //! return a vector with the specified index
+ const float* getPoint(int ptidx, int* label=0) const;
+ //! returns the search space dimensionality
+ CV_WRAP int dims() const;
+
+ vector<Node> nodes; //!< all the tree nodes
+ CV_PROP Mat points; //!< all the points. It can be a reordered copy of the input vector set or the original vector set.
+ CV_PROP vector<int> labels; //!< the parallel array of labels.
+ CV_PROP int maxDepth; //!< maximum depth of the search tree. Do not modify it
+ CV_PROP_RW int normType; //!< type of the distance (cv::NORM_L1 or cv::NORM_L2) used for search. Initially set to cv::NORM_L2, but you can modify it
+};
+
+//////////////////////////////////////// XML & YAML I/O ////////////////////////////////////
+
+class CV_EXPORTS FileNode;
+
+/*!
+ XML/YAML File Storage Class.
+
+ The class describes an object associated with XML or YAML file.
+ It can be used to store data to such a file or read and decode the data.
+
+ The storage is organized as a tree of nested sequences (or lists) and mappings.
+ Sequence is a heterogenious array, which elements are accessed by indices or sequentially using an iterator.
+ Mapping is analogue of std::map or C structure, which elements are accessed by names.
+ The most top level structure is a mapping.
+ Leaves of the file storage tree are integers, floating-point numbers and text strings.
+
+ For example, the following code:
+
+ \code
+ // open file storage for writing. Type of the file is determined from the extension
+ FileStorage fs("test.yml", FileStorage::WRITE);
+ fs << "test_int" << 5 << "test_real" << 3.1 << "test_string" << "ABCDEFGH";
+ fs << "test_mat" << Mat::eye(3,3,CV_32F);
+
+ fs << "test_list" << "[" << 0.0000000000001 << 2 << CV_PI << -3435345 << "2-502 2-029 3egegeg" <<
+ "{:" << "month" << 12 << "day" << 31 << "year" << 1969 << "}" << "]";
+ fs << "test_map" << "{" << "x" << 1 << "y" << 2 << "width" << 100 << "height" << 200 << "lbp" << "[:";
+
+ const uchar arr[] = {0, 1, 1, 0, 1, 1, 0, 1};
+ fs.writeRaw("u", arr, (int)(sizeof(arr)/sizeof(arr[0])));
+
+ fs << "]" << "}";
+ \endcode
+
+ will produce the following file:
+
+ \verbatim
+ %YAML:1.0
+ test_int: 5
+ test_real: 3.1000000000000001e+00
+ test_string: ABCDEFGH
+ test_mat: !!opencv-matrix
+ rows: 3
+ cols: 3
+ dt: f
+ data: [ 1., 0., 0., 0., 1., 0., 0., 0., 1. ]
+ test_list:
+ - 1.0000000000000000e-13
+ - 2
+ - 3.1415926535897931e+00
+ - -3435345
+ - "2-502 2-029 3egegeg"
+ - { month:12, day:31, year:1969 }
+ test_map:
+ x: 1
+ y: 2
+ width: 100
+ height: 200
+ lbp: [ 0, 1, 1, 0, 1, 1, 0, 1 ]
+ \endverbatim
+
+ and to read the file above, the following code can be used:
+
+ \code
+ // open file storage for reading.
+ // Type of the file is determined from the content, not the extension
+ FileStorage fs("test.yml", FileStorage::READ);
+ int test_int = (int)fs["test_int"];
+ double test_real = (double)fs["test_real"];
+ string test_string = (string)fs["test_string"];
+
+ Mat M;
+ fs["test_mat"] >> M;
+
+ FileNode tl = fs["test_list"];
+ CV_Assert(tl.type() == FileNode::SEQ && tl.size() == 6);
+ double tl0 = (double)tl[0];
+ int tl1 = (int)tl[1];
+ double tl2 = (double)tl[2];
+ int tl3 = (int)tl[3];
+ string tl4 = (string)tl[4];
+ CV_Assert(tl[5].type() == FileNode::MAP && tl[5].size() == 3);
+
+ int month = (int)tl[5]["month"];
+ int day = (int)tl[5]["day"];
+ int year = (int)tl[5]["year"];
+
+ FileNode tm = fs["test_map"];
+
+ int x = (int)tm["x"];
+ int y = (int)tm["y"];
+ int width = (int)tm["width"];
+ int height = (int)tm["height"];
+
+ int lbp_val = 0;
+ FileNodeIterator it = tm["lbp"].begin();
+
+ for(int k = 0; k < 8; k++, ++it)
+ lbp_val |= ((int)*it) << k;
+ \endcode
+*/
+class CV_EXPORTS_W FileStorage
+{
+public:
+ //! file storage mode
+ enum
+ {
+ READ=0, //! read mode
+ WRITE=1, //! write mode
+ APPEND=2, //! append mode
+ MEMORY=4,
+ FORMAT_MASK=(7<<3),
+ FORMAT_AUTO=0,
+ FORMAT_XML=(1<<3),
+ FORMAT_YAML=(2<<3)
+ };
+ enum
+ {
+ UNDEFINED=0,
+ VALUE_EXPECTED=1,
+ NAME_EXPECTED=2,
+ INSIDE_MAP=4
+ };
+ //! the default constructor
+ CV_WRAP FileStorage();
+ //! the full constructor that opens file storage for reading or writing
+ CV_WRAP FileStorage(const string& source, int flags, const string& encoding=string());
+ //! the constructor that takes pointer to the C FileStorage structure
+ FileStorage(CvFileStorage* fs);
+ //! the destructor. calls release()
+ virtual ~FileStorage();
+
+ //! opens file storage for reading or writing. The previous storage is closed with release()
+ CV_WRAP virtual bool open(const string& filename, int flags, const string& encoding=string());
+ //! returns true if the object is associated with currently opened file.
+ CV_WRAP virtual bool isOpened() const;
+ //! closes the file and releases all the memory buffers
+ CV_WRAP virtual void release();
+ //! closes the file, releases all the memory buffers and returns the text string
+ CV_WRAP string releaseAndGetString();
+
+ //! returns the first element of the top-level mapping
+ CV_WRAP FileNode getFirstTopLevelNode() const;
+ //! returns the top-level mapping. YAML supports multiple streams
+ CV_WRAP FileNode root(int streamidx=0) const;
+ //! returns the specified element of the top-level mapping
+ FileNode operator[](const string& nodename) const;
+ //! returns the specified element of the top-level mapping
+ CV_WRAP FileNode operator[](const char* nodename) const;
+
+ //! returns pointer to the underlying C FileStorage structure
+ CvFileStorage* operator *() { return fs; }
+ //! returns pointer to the underlying C FileStorage structure
+ const CvFileStorage* operator *() const { return fs; }
+ //! writes one or more numbers of the specified format to the currently written structure
+ void writeRaw( const string& fmt, const uchar* vec, size_t len );
+ //! writes the registered C structure (CvMat, CvMatND, CvSeq). See cvWrite()
+ void writeObj( const string& name, const void* obj );
+
+ //! returns the normalized object name for the specified file name
+ static string getDefaultObjectName(const string& filename);
+
+ Ptr<CvFileStorage> fs; //!< the underlying C FileStorage structure
+ string elname; //!< the currently written element
+ vector<char> structs; //!< the stack of written structures
+ int state; //!< the writer state
+};
+
+class CV_EXPORTS FileNodeIterator;
+
+/*!
+ File Storage Node class
+
+ The node is used to store each and every element of the file storage opened for reading -
+ from the primitive objects, such as numbers and text strings, to the complex nodes:
+ sequences, mappings and the registered objects.
+
+ Note that file nodes are only used for navigating file storages opened for reading.
+ When a file storage is opened for writing, no data is stored in memory after it is written.
+*/
+class CV_EXPORTS_W_SIMPLE FileNode
+{
+public:
+ //! type of the file storage node
+ enum
+ {
+ NONE=0, //!< empty node
+ INT=1, //!< an integer
+ REAL=2, //!< floating-point number
+ FLOAT=REAL, //!< synonym or REAL
+ STR=3, //!< text string in UTF-8 encoding
+ STRING=STR, //!< synonym for STR
+ REF=4, //!< integer of size size_t. Typically used for storing complex dynamic structures where some elements reference the others
+ SEQ=5, //!< sequence
+ MAP=6, //!< mapping
+ TYPE_MASK=7,
+ FLOW=8, //!< compact representation of a sequence or mapping. Used only by YAML writer
+ USER=16, //!< a registered object (e.g. a matrix)
+ EMPTY=32, //!< empty structure (sequence or mapping)
+ NAMED=64 //!< the node has a name (i.e. it is element of a mapping)
+ };
+ //! the default constructor
+ CV_WRAP FileNode();
+ //! the full constructor wrapping CvFileNode structure.
+ FileNode(const CvFileStorage* fs, const CvFileNode* node);
+ //! the copy constructor
+ FileNode(const FileNode& node);
+ //! returns element of a mapping node
+ FileNode operator[](const string& nodename) const;
+ //! returns element of a mapping node
+ CV_WRAP FileNode operator[](const char* nodename) const;
+ //! returns element of a sequence node
+ CV_WRAP FileNode operator[](int i) const;
+ //! returns type of the node
+ CV_WRAP int type() const;
+
+ //! returns true if the node is empty
+ CV_WRAP bool empty() const;
+ //! returns true if the node is a "none" object
+ CV_WRAP bool isNone() const;
+ //! returns true if the node is a sequence
+ CV_WRAP bool isSeq() const;
+ //! returns true if the node is a mapping
+ CV_WRAP bool isMap() const;
+ //! returns true if the node is an integer
+ CV_WRAP bool isInt() const;
+ //! returns true if the node is a floating-point number
+ CV_WRAP bool isReal() const;
+ //! returns true if the node is a text string
+ CV_WRAP bool isString() const;
+ //! returns true if the node has a name
+ CV_WRAP bool isNamed() const;
+ //! returns the node name or an empty string if the node is nameless
+ CV_WRAP string name() const;
+ //! returns the number of elements in the node, if it is a sequence or mapping, or 1 otherwise.
+ CV_WRAP size_t size() const;
+ //! returns the node content as an integer. If the node stores floating-point number, it is rounded.
+ operator int() const;
+ //! returns the node content as float
+ operator float() const;
+ //! returns the node content as double
+ operator double() const;
+ //! returns the node content as text string
+ operator string() const;
+
+ //! returns pointer to the underlying file node
+ CvFileNode* operator *();
+ //! returns pointer to the underlying file node
+ const CvFileNode* operator* () const;
+
+ //! returns iterator pointing to the first node element
+ FileNodeIterator begin() const;
+ //! returns iterator pointing to the element following the last node element
+ FileNodeIterator end() const;
+
+ //! reads node elements to the buffer with the specified format
+ void readRaw( const string& fmt, uchar* vec, size_t len ) const;
+ //! reads the registered object and returns pointer to it
+ void* readObj() const;
+
+ // do not use wrapper pointer classes for better efficiency
+ const CvFileStorage* fs;
+ const CvFileNode* node;
+};
+
+
+/*!
+ File Node Iterator
+
+ The class is used for iterating sequences (usually) and mappings.
+ */
+class CV_EXPORTS FileNodeIterator
+{
+public:
+ //! the default constructor
+ FileNodeIterator();
+ //! the full constructor set to the ofs-th element of the node
+ FileNodeIterator(const CvFileStorage* fs, const CvFileNode* node, size_t ofs=0);
+ //! the copy constructor
+ FileNodeIterator(const FileNodeIterator& it);
+ //! returns the currently observed element
+ FileNode operator *() const;
+ //! accesses the currently observed element methods
+ FileNode operator ->() const;
+
+ //! moves iterator to the next node
+ FileNodeIterator& operator ++ ();
+ //! moves iterator to the next node
+ FileNodeIterator operator ++ (int);
+ //! moves iterator to the previous node
+ FileNodeIterator& operator -- ();
+ //! moves iterator to the previous node
+ FileNodeIterator operator -- (int);
+ //! moves iterator forward by the specified offset (possibly negative)
+ FileNodeIterator& operator += (int ofs);
+ //! moves iterator backward by the specified offset (possibly negative)
+ FileNodeIterator& operator -= (int ofs);
+
+ //! reads the next maxCount elements (or less, if the sequence/mapping last element occurs earlier) to the buffer with the specified format
+ FileNodeIterator& readRaw( const string& fmt, uchar* vec,
+ size_t maxCount=(size_t)INT_MAX );
+
+ const CvFileStorage* fs;
+ const CvFileNode* container;
+ CvSeqReader reader;
+ size_t remaining;
+};
+
+////////////// convenient wrappers for operating old-style dynamic structures //////////////
+
+template<typename _Tp> class SeqIterator;
+
+typedef Ptr<CvMemStorage> MemStorage;
+
+/*!
+ Template Sequence Class derived from CvSeq
+
+ The class provides more convenient access to sequence elements,
+ STL-style operations and iterators.
+
+ \note The class is targeted for simple data types,
+ i.e. no constructors or destructors
+ are called for the sequence elements.
+*/
+template<typename _Tp> class Seq
+{
+public:
+ typedef SeqIterator<_Tp> iterator;
+ typedef SeqIterator<_Tp> const_iterator;
+
+ //! the default constructor
+ Seq();
+ //! the constructor for wrapping CvSeq structure. The real element type in CvSeq should match _Tp.
+ Seq(const CvSeq* seq);
+ //! creates the empty sequence that resides in the specified storage
+ Seq(MemStorage& storage, int headerSize = sizeof(CvSeq));
+ //! returns read-write reference to the specified element
+ _Tp& operator [](int idx);
+ //! returns read-only reference to the specified element
+ const _Tp& operator[](int idx) const;
+ //! returns iterator pointing to the beginning of the sequence
+ SeqIterator<_Tp> begin() const;
+ //! returns iterator pointing to the element following the last sequence element
+ SeqIterator<_Tp> end() const;
+ //! returns the number of elements in the sequence
+ size_t size() const;
+ //! returns the type of sequence elements (CV_8UC1 ... CV_64FC(CV_CN_MAX) ...)
+ int type() const;
+ //! returns the depth of sequence elements (CV_8U ... CV_64F)
+ int depth() const;
+ //! returns the number of channels in each sequence element
+ int channels() const;
+ //! returns the size of each sequence element
+ size_t elemSize() const;
+ //! returns index of the specified sequence element
+ size_t index(const _Tp& elem) const;
+ //! appends the specified element to the end of the sequence
+ void push_back(const _Tp& elem);
+ //! appends the specified element to the front of the sequence
+ void push_front(const _Tp& elem);
+ //! appends zero or more elements to the end of the sequence
+ void push_back(const _Tp* elems, size_t count);
+ //! appends zero or more elements to the front of the sequence
+ void push_front(const _Tp* elems, size_t count);
+ //! inserts the specified element to the specified position
+ void insert(int idx, const _Tp& elem);
+ //! inserts zero or more elements to the specified position
+ void insert(int idx, const _Tp* elems, size_t count);
+ //! removes element at the specified position
+ void remove(int idx);
+ //! removes the specified subsequence
+ void remove(const Range& r);
+
+ //! returns reference to the first sequence element
+ _Tp& front();
+ //! returns read-only reference to the first sequence element
+ const _Tp& front() const;
+ //! returns reference to the last sequence element
+ _Tp& back();
+ //! returns read-only reference to the last sequence element
+ const _Tp& back() const;
+ //! returns true iff the sequence contains no elements
+ bool empty() const;
+
+ //! removes all the elements from the sequence
+ void clear();
+ //! removes the first element from the sequence
+ void pop_front();
+ //! removes the last element from the sequence
+ void pop_back();
+ //! removes zero or more elements from the beginning of the sequence
+ void pop_front(_Tp* elems, size_t count);
+ //! removes zero or more elements from the end of the sequence
+ void pop_back(_Tp* elems, size_t count);
+
+ //! copies the whole sequence or the sequence slice to the specified vector
+ void copyTo(vector<_Tp>& vec, const Range& range=Range::all()) const;
+ //! returns the vector containing all the sequence elements
+ operator vector<_Tp>() const;
+
+ CvSeq* seq;
+};
+
+
+/*!
+ STL-style Sequence Iterator inherited from the CvSeqReader structure
+*/
+template<typename _Tp> class SeqIterator : public CvSeqReader
+{
+public:
+ //! the default constructor
+ SeqIterator();
+ //! the constructor setting the iterator to the beginning or to the end of the sequence
+ SeqIterator(const Seq<_Tp>& seq, bool seekEnd=false);
+ //! positions the iterator within the sequence
+ void seek(size_t pos);
+ //! reports the current iterator position
+ size_t tell() const;
+ //! returns reference to the current sequence element
+ _Tp& operator *();
+ //! returns read-only reference to the current sequence element
+ const _Tp& operator *() const;
+ //! moves iterator to the next sequence element
+ SeqIterator& operator ++();
+ //! moves iterator to the next sequence element
+ SeqIterator operator ++(int) const;
+ //! moves iterator to the previous sequence element
+ SeqIterator& operator --();
+ //! moves iterator to the previous sequence element
+ SeqIterator operator --(int) const;
+
+ //! moves iterator forward by the specified offset (possibly negative)
+ SeqIterator& operator +=(int);
+ //! moves iterator backward by the specified offset (possibly negative)
+ SeqIterator& operator -=(int);
+
+ // this is index of the current element module seq->total*2
+ // (to distinguish between 0 and seq->total)
+ int index;
+};
+
+
+class CV_EXPORTS Algorithm;
+class CV_EXPORTS AlgorithmInfo;
+struct CV_EXPORTS AlgorithmInfoData;
+
+template<typename _Tp> struct ParamType {};
+
+/*!
+ Base class for high-level OpenCV algorithms
+*/
+class CV_EXPORTS_W Algorithm
+{
+public:
+ Algorithm();
+ virtual ~Algorithm();
+ string name() const;
+
+ template<typename _Tp> typename ParamType<_Tp>::member_type get(const string& name) const;
+ template<typename _Tp> typename ParamType<_Tp>::member_type get(const char* name) const;
+
+ CV_WRAP int getInt(const string& name) const;
+ CV_WRAP double getDouble(const string& name) const;
+ CV_WRAP bool getBool(const string& name) const;
+ CV_WRAP string getString(const string& name) const;
+ CV_WRAP Mat getMat(const string& name) const;
+ CV_WRAP vector<Mat> getMatVector(const string& name) const;
+ CV_WRAP Ptr<Algorithm> getAlgorithm(const string& name) const;
+
+ void set(const string& name, int value);
+ void set(const string& name, double value);
+ void set(const string& name, bool value);
+ void set(const string& name, const string& value);
+ void set(const string& name, const Mat& value);
+ void set(const string& name, const vector<Mat>& value);
+ void set(const string& name, const Ptr<Algorithm>& value);
+ template<typename _Tp> void set(const string& name, const Ptr<_Tp>& value);
+
+ CV_WRAP void setInt(const string& name, int value);
+ CV_WRAP void setDouble(const string& name, double value);
+ CV_WRAP void setBool(const string& name, bool value);
+ CV_WRAP void setString(const string& name, const string& value);
+ CV_WRAP void setMat(const string& name, const Mat& value);
+ CV_WRAP void setMatVector(const string& name, const vector<Mat>& value);
+ CV_WRAP void setAlgorithm(const string& name, const Ptr<Algorithm>& value);
+ template<typename _Tp> void setAlgorithm(const string& name, const Ptr<_Tp>& value);
+
+ void set(const char* name, int value);
+ void set(const char* name, double value);
+ void set(const char* name, bool value);
+ void set(const char* name, const string& value);
+ void set(const char* name, const Mat& value);
+ void set(const char* name, const vector<Mat>& value);
+ void set(const char* name, const Ptr<Algorithm>& value);
+ template<typename _Tp> void set(const char* name, const Ptr<_Tp>& value);
+
+ void setInt(const char* name, int value);
+ void setDouble(const char* name, double value);
+ void setBool(const char* name, bool value);
+ void setString(const char* name, const string& value);
+ void setMat(const char* name, const Mat& value);
+ void setMatVector(const char* name, const vector<Mat>& value);
+ void setAlgorithm(const char* name, const Ptr<Algorithm>& value);
+ template<typename _Tp> void setAlgorithm(const char* name, const Ptr<_Tp>& value);
+
+ CV_WRAP string paramHelp(const string& name) const;
+ int paramType(const char* name) const;
+ CV_WRAP int paramType(const string& name) const;
+ CV_WRAP void getParams(CV_OUT vector<string>& names) const;
+
+
+ virtual void write(FileStorage& fs) const;
+ virtual void read(const FileNode& fn);
+
+ typedef Algorithm* (*Constructor)(void);
+ typedef int (Algorithm::*Getter)() const;
+ typedef void (Algorithm::*Setter)(int);
+
+ CV_WRAP static void getList(CV_OUT vector<string>& algorithms);
+ CV_WRAP static Ptr<Algorithm> _create(const string& name);
+ template<typename _Tp> static Ptr<_Tp> create(const string& name);
+
+ virtual AlgorithmInfo* info() const /* TODO: make it = 0;*/ { return 0; }
+};
+
+
+class CV_EXPORTS AlgorithmInfo
+{
+public:
+ friend class Algorithm;
+ AlgorithmInfo(const string& name, Algorithm::Constructor create);
+ ~AlgorithmInfo();
+ void get(const Algorithm* algo, const char* name, int argType, void* value) const;
+ void addParam_(Algorithm& algo, const char* name, int argType,
+ void* value, bool readOnly,
+ Algorithm::Getter getter, Algorithm::Setter setter,
+ const string& help=string());
+ string paramHelp(const char* name) const;
+ int paramType(const char* name) const;
+ void getParams(vector<string>& names) const;
+
+ void write(const Algorithm* algo, FileStorage& fs) const;
+ void read(Algorithm* algo, const FileNode& fn) const;
+ string name() const;
+
+ void addParam(Algorithm& algo, const char* name,
+ int& value, bool readOnly=false,
+ int (Algorithm::*getter)()=0,
+ void (Algorithm::*setter)(int)=0,
+ const string& help=string());
+ void addParam(Algorithm& algo, const char* name,
+ short& value, bool readOnly=false,
+ int (Algorithm::*getter)()=0,
+ void (Algorithm::*setter)(int)=0,
+ const string& help=string());
+ void addParam(Algorithm& algo, const char* name,
+ bool& value, bool readOnly=false,
+ int (Algorithm::*getter)()=0,
+ void (Algorithm::*setter)(int)=0,
+ const string& help=string());
+ void addParam(Algorithm& algo, const char* name,
+ double& value, bool readOnly=false,
+ double (Algorithm::*getter)()=0,
+ void (Algorithm::*setter)(double)=0,
+ const string& help=string());
+ void addParam(Algorithm& algo, const char* name,
+ string& value, bool readOnly=false,
+ string (Algorithm::*getter)()=0,
+ void (Algorithm::*setter)(const string&)=0,
+ const string& help=string());
+ void addParam(Algorithm& algo, const char* name,
+ Mat& value, bool readOnly=false,
+ Mat (Algorithm::*getter)()=0,
+ void (Algorithm::*setter)(const Mat&)=0,
+ const string& help=string());
+ void addParam(Algorithm& algo, const char* name,
+ vector<Mat>& value, bool readOnly=false,
+ vector<Mat> (Algorithm::*getter)()=0,
+ void (Algorithm::*setter)(const vector<Mat>&)=0,
+ const string& help=string());
+ void addParam(Algorithm& algo, const char* name,
+ Ptr<Algorithm>& value, bool readOnly=false,
+ Ptr<Algorithm> (Algorithm::*getter)()=0,
+ void (Algorithm::*setter)(const Ptr<Algorithm>&)=0,
+ const string& help=string());
+ void addParam(Algorithm& algo, const char* name,
+ float& value, bool readOnly=false,
+ float (Algorithm::*getter)()=0,
+ void (Algorithm::*setter)(float)=0,
+ const string& help=string());
+ void addParam(Algorithm& algo, const char* name,
+ unsigned int& value, bool readOnly=false,
+ unsigned int (Algorithm::*getter)()=0,
+ void (Algorithm::*setter)(unsigned int)=0,
+ const string& help=string());
+ void addParam(Algorithm& algo, const char* name,
+ uint64& value, bool readOnly=false,
+ uint64 (Algorithm::*getter)()=0,
+ void (Algorithm::*setter)(uint64)=0,
+ const string& help=string());
+ void addParam(Algorithm& algo, const char* name,
+ uchar& value, bool readOnly=false,
+ uchar (Algorithm::*getter)()=0,
+ void (Algorithm::*setter)(uchar)=0,
+ const string& help=string());
+ template<typename _Tp, typename _Base> void addParam(Algorithm& algo, const char* name,
+ Ptr<_Tp>& value, bool readOnly=false,
+ Ptr<_Tp> (Algorithm::*getter)()=0,
+ void (Algorithm::*setter)(const Ptr<_Tp>&)=0,
+ const string& help=string());
+ template<typename _Tp> void addParam(Algorithm& algo, const char* name,
+ Ptr<_Tp>& value, bool readOnly=false,
+ Ptr<_Tp> (Algorithm::*getter)()=0,
+ void (Algorithm::*setter)(const Ptr<_Tp>&)=0,
+ const string& help=string());
+protected:
+ AlgorithmInfoData* data;
+ void set(Algorithm* algo, const char* name, int argType,
+ const void* value, bool force=false) const;
+};
+
+
+struct CV_EXPORTS Param
+{
+ enum { INT=0, BOOLEAN=1, REAL=2, STRING=3, MAT=4, MAT_VECTOR=5, ALGORITHM=6, FLOAT=7, UNSIGNED_INT=8, UINT64=9, SHORT=10, UCHAR=11 };
+
+ Param();
+ Param(int _type, bool _readonly, int _offset,
+ Algorithm::Getter _getter=0,
+ Algorithm::Setter _setter=0,
+ const string& _help=string());
+ int type;
+ int offset;
+ bool readonly;
+ Algorithm::Getter getter;
+ Algorithm::Setter setter;
+ string help;
+};
+
+template<> struct ParamType<bool>
+{
+ typedef bool const_param_type;
+ typedef bool member_type;
+
+ enum { type = Param::BOOLEAN };
+};
+
+template<> struct ParamType<int>
+{
+ typedef int const_param_type;
+ typedef int member_type;
+
+ enum { type = Param::INT };
+};
+
+template<> struct ParamType<short>
+{
+ typedef int const_param_type;
+ typedef int member_type;
+
+ enum { type = Param::SHORT };
+};
+
+template<> struct ParamType<double>
+{
+ typedef double const_param_type;
+ typedef double member_type;
+
+ enum { type = Param::REAL };
+};
+
+template<> struct ParamType<string>
+{
+ typedef const string& const_param_type;
+ typedef string member_type;
+
+ enum { type = Param::STRING };
+};
+
+template<> struct ParamType<Mat>
+{
+ typedef const Mat& const_param_type;
+ typedef Mat member_type;
+
+ enum { type = Param::MAT };
+};
+
+template<> struct ParamType<vector<Mat> >
+{
+ typedef const vector<Mat>& const_param_type;
+ typedef vector<Mat> member_type;
+
+ enum { type = Param::MAT_VECTOR };
+};
+
+template<> struct ParamType<Algorithm>
+{
+ typedef const Ptr<Algorithm>& const_param_type;
+ typedef Ptr<Algorithm> member_type;
+
+ enum { type = Param::ALGORITHM };
+};
+
+template<> struct ParamType<float>
+{
+ typedef float const_param_type;
+ typedef float member_type;
+
+ enum { type = Param::FLOAT };
+};
+
+template<> struct ParamType<unsigned>
+{
+ typedef unsigned const_param_type;
+ typedef unsigned member_type;
+
+ enum { type = Param::UNSIGNED_INT };
+};
+
+template<> struct ParamType<uint64>
+{
+ typedef uint64 const_param_type;
+ typedef uint64 member_type;
+
+ enum { type = Param::UINT64 };
+};
+
+template<> struct ParamType<uchar>
+{
+ typedef uchar const_param_type;
+ typedef uchar member_type;
+
+ enum { type = Param::UCHAR };
+};
+
+/*!
+"\nThe CommandLineParser class is designed for command line arguments parsing\n"
+ "Keys map: \n"
+ "Before you start to work with CommandLineParser you have to create a map for keys.\n"
+ " It will look like this\n"
+ " const char* keys =\n"
+ " {\n"
+ " { s| string| 123asd |string parameter}\n"
+ " { d| digit | 100 |digit parameter }\n"
+ " { c|noCamera|false |without camera }\n"
+ " { 1| |some text|help }\n"
+ " { 2| |333 |another help }\n"
+ " };\n"
+ "Usage syntax: \n"
+ " \"{\" - start of parameter string.\n"
+ " \"}\" - end of parameter string\n"
+ " \"|\" - separator between short name, full name, default value and help\n"
+ "Supported syntax: \n"
+ " --key1=arg1 <If a key with '--' must has an argument\n"
+ " you have to assign it through '=' sign.> \n"
+ "<If the key with '--' doesn't have any argument, it means that it is a bool key>\n"
+ " -key2=arg2 <If a key with '-' must has an argument \n"
+ " you have to assign it through '=' sign.> \n"
+ "If the key with '-' doesn't have any argument, it means that it is a bool key\n"
+ " key3 <This key can't has any parameter> \n"
+ "Usage: \n"
+ " Imagine that the input parameters are next:\n"
+ " -s=string_value --digit=250 --noCamera lena.jpg 10000\n"
+ " CommandLineParser parser(argc, argv, keys) - create a parser object\n"
+ " parser.get<string>(\"s\" or \"string\") will return you first parameter value\n"
+ " parser.get<string>(\"s\", false or \"string\", false) will return you first parameter value\n"
+ " without spaces in end and begin\n"
+ " parser.get<int>(\"d\" or \"digit\") will return you second parameter value.\n"
+ " It also works with 'unsigned int', 'double', and 'float' types>\n"
+ " parser.get<bool>(\"c\" or \"noCamera\") will return you true .\n"
+ " If you enter this key in commandline>\n"
+ " It return you false otherwise.\n"
+ " parser.get<string>(\"1\") will return you the first argument without parameter (lena.jpg) \n"
+ " parser.get<int>(\"2\") will return you the second argument without parameter (10000)\n"
+ " It also works with 'unsigned int', 'double', and 'float' types \n"
+*/
+class CV_EXPORTS CommandLineParser
+{
+ public:
+
+ //! the default constructor
+ CommandLineParser(int argc, const char* const argv[], const char* key_map);
+
+ //! get parameter, you can choose: delete spaces in end and begin or not
+ template<typename _Tp>
+ _Tp get(const std::string& name, bool space_delete=true)
+ {
+ if (!has(name))
+ {
+ return _Tp();
+ }
+ std::string str = getString(name);
+ return analyzeValue<_Tp>(str, space_delete);
+ }
+
+ //! print short name, full name, current value and help for all params
+ void printParams();
+
+ protected:
+ std::map<std::string, std::vector<std::string> > data;
+ std::string getString(const std::string& name);
+
+ bool has(const std::string& keys);
+
+ template<typename _Tp>
+ _Tp analyzeValue(const std::string& str, bool space_delete=false);
+
+ template<typename _Tp>
+ static _Tp getData(const std::string& str)
+ {
+ _Tp res = _Tp();
+ std::stringstream s1(str);
+ s1 >> res;
+ return res;
+ }
+
+ template<typename _Tp>
+ _Tp fromStringNumber(const std::string& str);//the default conversion function for numbers
+
+ };
+
+template<> CV_EXPORTS
+bool CommandLineParser::get<bool>(const std::string& name, bool space_delete);
+
+template<> CV_EXPORTS
+std::string CommandLineParser::analyzeValue<std::string>(const std::string& str, bool space_delete);
+
+template<> CV_EXPORTS
+int CommandLineParser::analyzeValue<int>(const std::string& str, bool space_delete);
+
+template<> CV_EXPORTS
+unsigned int CommandLineParser::analyzeValue<unsigned int>(const std::string& str, bool space_delete);
+
+template<> CV_EXPORTS
+uint64 CommandLineParser::analyzeValue<uint64>(const std::string& str, bool space_delete);
+
+template<> CV_EXPORTS
+float CommandLineParser::analyzeValue<float>(const std::string& str, bool space_delete);
+
+template<> CV_EXPORTS
+double CommandLineParser::analyzeValue<double>(const std::string& str, bool space_delete);
+
+
+/////////////////////////////// Parallel Primitives //////////////////////////////////
+
+// a base body class
+class CV_EXPORTS ParallelLoopBody
+{
+public:
+ virtual ~ParallelLoopBody();
+ virtual void operator() (const Range& range) const = 0;
+};
+
+CV_EXPORTS void parallel_for_(const Range& range, const ParallelLoopBody& body, double nstripes=-1.);
+
+/////////////////////////// Synchronization Primitives ///////////////////////////////
+
+class CV_EXPORTS Mutex
+{
+public:
+ Mutex();
+ ~Mutex();
+ Mutex(const Mutex& m);
+ Mutex& operator = (const Mutex& m);
+
+ void lock();
+ bool trylock();
+ void unlock();
+
+ struct Impl;
+protected:
+ Impl* impl;
+};
+
+class CV_EXPORTS AutoLock
+{
+public:
+ AutoLock(Mutex& m) : mutex(&m) { mutex->lock(); }
+ ~AutoLock() { mutex->unlock(); }
+protected:
+ Mutex* mutex;
+private:
+ AutoLock(const AutoLock&);
+ AutoLock& operator = (const AutoLock&);
+};
+
+class TLSDataContainer
+{
+private:
+ int key_;
+protected:
+ CV_EXPORTS TLSDataContainer();
+ CV_EXPORTS ~TLSDataContainer(); // virtual is not required
+public:
+ virtual void* createDataInstance() const = 0;
+ virtual void deleteDataInstance(void* data) const = 0;
+
+ CV_EXPORTS void* getData() const;
+};
+
+template <typename T>
+class TLSData : protected TLSDataContainer
+{
+public:
+ inline TLSData() {}
+ inline ~TLSData() {}
+ inline T* get() const { return (T*)getData(); }
+private:
+ virtual void* createDataInstance() const { return new T; }
+ virtual void deleteDataInstance(void* data) const { delete (T*)data; }
+};
+
+}
+
+#endif // __cplusplus
+
+#include "opencv2/core/operations.hpp"
+#include "opencv2/core/mat.hpp"
+
+#endif /*__OPENCV_CORE_HPP__*/
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/core_c.h b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/core_c.h
new file mode 100644
index 00000000..b9f1090a
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/core_c.h
@@ -0,0 +1,1886 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+
+#ifndef __OPENCV_CORE_C_H__
+#define __OPENCV_CORE_C_H__
+
+#include "opencv2/core/types_c.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/****************************************************************************************\
+* Array allocation, deallocation, initialization and access to elements *
+\****************************************************************************************/
+
+/* <malloc> wrapper.
+ If there is no enough memory, the function
+ (as well as other OpenCV functions that call cvAlloc)
+ raises an error. */
+CVAPI(void*) cvAlloc( size_t size );
+
+/* <free> wrapper.
+ Here and further all the memory releasing functions
+ (that all call cvFree) take double pointer in order to
+ to clear pointer to the data after releasing it.
+ Passing pointer to NULL pointer is Ok: nothing happens in this case
+*/
+CVAPI(void) cvFree_( void* ptr );
+#define cvFree(ptr) (cvFree_(*(ptr)), *(ptr)=0)
+
+/* Allocates and initializes IplImage header */
+CVAPI(IplImage*) cvCreateImageHeader( CvSize size, int depth, int channels );
+
+/* Inializes IplImage header */
+CVAPI(IplImage*) cvInitImageHeader( IplImage* image, CvSize size, int depth,
+ int channels, int origin CV_DEFAULT(0),
+ int align CV_DEFAULT(4));
+
+/* Creates IPL image (header and data) */
+CVAPI(IplImage*) cvCreateImage( CvSize size, int depth, int channels );
+
+/* Releases (i.e. deallocates) IPL image header */
+CVAPI(void) cvReleaseImageHeader( IplImage** image );
+
+/* Releases IPL image header and data */
+CVAPI(void) cvReleaseImage( IplImage** image );
+
+/* Creates a copy of IPL image (widthStep may differ) */
+CVAPI(IplImage*) cvCloneImage( const IplImage* image );
+
+/* Sets a Channel Of Interest (only a few functions support COI) -
+ use cvCopy to extract the selected channel and/or put it back */
+CVAPI(void) cvSetImageCOI( IplImage* image, int coi );
+
+/* Retrieves image Channel Of Interest */
+CVAPI(int) cvGetImageCOI( const IplImage* image );
+
+/* Sets image ROI (region of interest) (COI is not changed) */
+CVAPI(void) cvSetImageROI( IplImage* image, CvRect rect );
+
+/* Resets image ROI and COI */
+CVAPI(void) cvResetImageROI( IplImage* image );
+
+/* Retrieves image ROI */
+CVAPI(CvRect) cvGetImageROI( const IplImage* image );
+
+/* Allocates and initializes CvMat header */
+CVAPI(CvMat*) cvCreateMatHeader( int rows, int cols, int type );
+
+#define CV_AUTOSTEP 0x7fffffff
+
+/* Initializes CvMat header */
+CVAPI(CvMat*) cvInitMatHeader( CvMat* mat, int rows, int cols,
+ int type, void* data CV_DEFAULT(NULL),
+ int step CV_DEFAULT(CV_AUTOSTEP) );
+
+/* Allocates and initializes CvMat header and allocates data */
+CVAPI(CvMat*) cvCreateMat( int rows, int cols, int type );
+
+/* Releases CvMat header and deallocates matrix data
+ (reference counting is used for data) */
+CVAPI(void) cvReleaseMat( CvMat** mat );
+
+/* Decrements CvMat data reference counter and deallocates the data if
+ it reaches 0 */
+CV_INLINE void cvDecRefData( CvArr* arr )
+{
+ if( CV_IS_MAT( arr ))
+ {
+ CvMat* mat = (CvMat*)arr;
+ mat->data.ptr = NULL;
+ if( mat->refcount != NULL && --*mat->refcount == 0 )
+ cvFree( &mat->refcount );
+ mat->refcount = NULL;
+ }
+ else if( CV_IS_MATND( arr ))
+ {
+ CvMatND* mat = (CvMatND*)arr;
+ mat->data.ptr = NULL;
+ if( mat->refcount != NULL && --*mat->refcount == 0 )
+ cvFree( &mat->refcount );
+ mat->refcount = NULL;
+ }
+}
+
+/* Increments CvMat data reference counter */
+CV_INLINE int cvIncRefData( CvArr* arr )
+{
+ int refcount = 0;
+ if( CV_IS_MAT( arr ))
+ {
+ CvMat* mat = (CvMat*)arr;
+ if( mat->refcount != NULL )
+ refcount = ++*mat->refcount;
+ }
+ else if( CV_IS_MATND( arr ))
+ {
+ CvMatND* mat = (CvMatND*)arr;
+ if( mat->refcount != NULL )
+ refcount = ++*mat->refcount;
+ }
+ return refcount;
+}
+
+
+/* Creates an exact copy of the input matrix (except, may be, step value) */
+CVAPI(CvMat*) cvCloneMat( const CvMat* mat );
+
+
+/* Makes a new matrix from <rect> subrectangle of input array.
+ No data is copied */
+CVAPI(CvMat*) cvGetSubRect( const CvArr* arr, CvMat* submat, CvRect rect );
+#define cvGetSubArr cvGetSubRect
+
+/* Selects row span of the input array: arr(start_row:delta_row:end_row,:)
+ (end_row is not included into the span). */
+CVAPI(CvMat*) cvGetRows( const CvArr* arr, CvMat* submat,
+ int start_row, int end_row,
+ int delta_row CV_DEFAULT(1));
+
+CV_INLINE CvMat* cvGetRow( const CvArr* arr, CvMat* submat, int row )
+{
+ return cvGetRows( arr, submat, row, row + 1, 1 );
+}
+
+
+/* Selects column span of the input array: arr(:,start_col:end_col)
+ (end_col is not included into the span) */
+CVAPI(CvMat*) cvGetCols( const CvArr* arr, CvMat* submat,
+ int start_col, int end_col );
+
+CV_INLINE CvMat* cvGetCol( const CvArr* arr, CvMat* submat, int col )
+{
+ return cvGetCols( arr, submat, col, col + 1 );
+}
+
+/* Select a diagonal of the input array.
+ (diag = 0 means the main diagonal, >0 means a diagonal above the main one,
+ <0 - below the main one).
+ The diagonal will be represented as a column (nx1 matrix). */
+CVAPI(CvMat*) cvGetDiag( const CvArr* arr, CvMat* submat,
+ int diag CV_DEFAULT(0));
+
+/* low-level scalar <-> raw data conversion functions */
+CVAPI(void) cvScalarToRawData( const CvScalar* scalar, void* data, int type,
+ int extend_to_12 CV_DEFAULT(0) );
+
+CVAPI(void) cvRawDataToScalar( const void* data, int type, CvScalar* scalar );
+
+/* Allocates and initializes CvMatND header */
+CVAPI(CvMatND*) cvCreateMatNDHeader( int dims, const int* sizes, int type );
+
+/* Allocates and initializes CvMatND header and allocates data */
+CVAPI(CvMatND*) cvCreateMatND( int dims, const int* sizes, int type );
+
+/* Initializes preallocated CvMatND header */
+CVAPI(CvMatND*) cvInitMatNDHeader( CvMatND* mat, int dims, const int* sizes,
+ int type, void* data CV_DEFAULT(NULL) );
+
+/* Releases CvMatND */
+CV_INLINE void cvReleaseMatND( CvMatND** mat )
+{
+ cvReleaseMat( (CvMat**)mat );
+}
+
+/* Creates a copy of CvMatND (except, may be, steps) */
+CVAPI(CvMatND*) cvCloneMatND( const CvMatND* mat );
+
+/* Allocates and initializes CvSparseMat header and allocates data */
+CVAPI(CvSparseMat*) cvCreateSparseMat( int dims, const int* sizes, int type );
+
+/* Releases CvSparseMat */
+CVAPI(void) cvReleaseSparseMat( CvSparseMat** mat );
+
+/* Creates a copy of CvSparseMat (except, may be, zero items) */
+CVAPI(CvSparseMat*) cvCloneSparseMat( const CvSparseMat* mat );
+
+/* Initializes sparse array iterator
+ (returns the first node or NULL if the array is empty) */
+CVAPI(CvSparseNode*) cvInitSparseMatIterator( const CvSparseMat* mat,
+ CvSparseMatIterator* mat_iterator );
+
+// returns next sparse array node (or NULL if there is no more nodes)
+CV_INLINE CvSparseNode* cvGetNextSparseNode( CvSparseMatIterator* mat_iterator )
+{
+ if( mat_iterator->node->next )
+ return mat_iterator->node = mat_iterator->node->next;
+ else
+ {
+ int idx;
+ for( idx = ++mat_iterator->curidx; idx < mat_iterator->mat->hashsize; idx++ )
+ {
+ CvSparseNode* node = (CvSparseNode*)mat_iterator->mat->hashtable[idx];
+ if( node )
+ {
+ mat_iterator->curidx = idx;
+ return mat_iterator->node = node;
+ }
+ }
+ return NULL;
+ }
+}
+
+/**************** matrix iterator: used for n-ary operations on dense arrays *********/
+
+#define CV_MAX_ARR 10
+
+typedef struct CvNArrayIterator
+{
+ int count; /* number of arrays */
+ int dims; /* number of dimensions to iterate */
+ CvSize size; /* maximal common linear size: { width = size, height = 1 } */
+ uchar* ptr[CV_MAX_ARR]; /* pointers to the array slices */
+ int stack[CV_MAX_DIM]; /* for internal use */
+ CvMatND* hdr[CV_MAX_ARR]; /* pointers to the headers of the
+ matrices that are processed */
+}
+CvNArrayIterator;
+
+#define CV_NO_DEPTH_CHECK 1
+#define CV_NO_CN_CHECK 2
+#define CV_NO_SIZE_CHECK 4
+
+/* initializes iterator that traverses through several arrays simulteneously
+ (the function together with cvNextArraySlice is used for
+ N-ari element-wise operations) */
+CVAPI(int) cvInitNArrayIterator( int count, CvArr** arrs,
+ const CvArr* mask, CvMatND* stubs,
+ CvNArrayIterator* array_iterator,
+ int flags CV_DEFAULT(0) );
+
+/* returns zero value if iteration is finished, non-zero (slice length) otherwise */
+CVAPI(int) cvNextNArraySlice( CvNArrayIterator* array_iterator );
+
+
+/* Returns type of array elements:
+ CV_8UC1 ... CV_64FC4 ... */
+CVAPI(int) cvGetElemType( const CvArr* arr );
+
+/* Retrieves number of an array dimensions and
+ optionally sizes of the dimensions */
+CVAPI(int) cvGetDims( const CvArr* arr, int* sizes CV_DEFAULT(NULL) );
+
+
+/* Retrieves size of a particular array dimension.
+ For 2d arrays cvGetDimSize(arr,0) returns number of rows (image height)
+ and cvGetDimSize(arr,1) returns number of columns (image width) */
+CVAPI(int) cvGetDimSize( const CvArr* arr, int index );
+
+
+/* ptr = &arr(idx0,idx1,...). All indexes are zero-based,
+ the major dimensions go first (e.g. (y,x) for 2D, (z,y,x) for 3D */
+CVAPI(uchar*) cvPtr1D( const CvArr* arr, int idx0, int* type CV_DEFAULT(NULL));
+CVAPI(uchar*) cvPtr2D( const CvArr* arr, int idx0, int idx1, int* type CV_DEFAULT(NULL) );
+CVAPI(uchar*) cvPtr3D( const CvArr* arr, int idx0, int idx1, int idx2,
+ int* type CV_DEFAULT(NULL));
+
+/* For CvMat or IplImage number of indices should be 2
+ (row index (y) goes first, column index (x) goes next).
+ For CvMatND or CvSparseMat number of infices should match number of <dims> and
+ indices order should match the array dimension order. */
+CVAPI(uchar*) cvPtrND( const CvArr* arr, const int* idx, int* type CV_DEFAULT(NULL),
+ int create_node CV_DEFAULT(1),
+ unsigned* precalc_hashval CV_DEFAULT(NULL));
+
+/* value = arr(idx0,idx1,...) */
+CVAPI(CvScalar) cvGet1D( const CvArr* arr, int idx0 );
+CVAPI(CvScalar) cvGet2D( const CvArr* arr, int idx0, int idx1 );
+CVAPI(CvScalar) cvGet3D( const CvArr* arr, int idx0, int idx1, int idx2 );
+CVAPI(CvScalar) cvGetND( const CvArr* arr, const int* idx );
+
+/* for 1-channel arrays */
+CVAPI(double) cvGetReal1D( const CvArr* arr, int idx0 );
+CVAPI(double) cvGetReal2D( const CvArr* arr, int idx0, int idx1 );
+CVAPI(double) cvGetReal3D( const CvArr* arr, int idx0, int idx1, int idx2 );
+CVAPI(double) cvGetRealND( const CvArr* arr, const int* idx );
+
+/* arr(idx0,idx1,...) = value */
+CVAPI(void) cvSet1D( CvArr* arr, int idx0, CvScalar value );
+CVAPI(void) cvSet2D( CvArr* arr, int idx0, int idx1, CvScalar value );
+CVAPI(void) cvSet3D( CvArr* arr, int idx0, int idx1, int idx2, CvScalar value );
+CVAPI(void) cvSetND( CvArr* arr, const int* idx, CvScalar value );
+
+/* for 1-channel arrays */
+CVAPI(void) cvSetReal1D( CvArr* arr, int idx0, double value );
+CVAPI(void) cvSetReal2D( CvArr* arr, int idx0, int idx1, double value );
+CVAPI(void) cvSetReal3D( CvArr* arr, int idx0,
+ int idx1, int idx2, double value );
+CVAPI(void) cvSetRealND( CvArr* arr, const int* idx, double value );
+
+/* clears element of ND dense array,
+ in case of sparse arrays it deletes the specified node */
+CVAPI(void) cvClearND( CvArr* arr, const int* idx );
+
+/* Converts CvArr (IplImage or CvMat,...) to CvMat.
+ If the last parameter is non-zero, function can
+ convert multi(>2)-dimensional array to CvMat as long as
+ the last array's dimension is continous. The resultant
+ matrix will be have appropriate (a huge) number of rows */
+CVAPI(CvMat*) cvGetMat( const CvArr* arr, CvMat* header,
+ int* coi CV_DEFAULT(NULL),
+ int allowND CV_DEFAULT(0));
+
+/* Converts CvArr (IplImage or CvMat) to IplImage */
+CVAPI(IplImage*) cvGetImage( const CvArr* arr, IplImage* image_header );
+
+
+/* Changes a shape of multi-dimensional array.
+ new_cn == 0 means that number of channels remains unchanged.
+ new_dims == 0 means that number and sizes of dimensions remain the same
+ (unless they need to be changed to set the new number of channels)
+ if new_dims == 1, there is no need to specify new dimension sizes
+ The resultant configuration should be achievable w/o data copying.
+ If the resultant array is sparse, CvSparseMat header should be passed
+ to the function else if the result is 1 or 2 dimensional,
+ CvMat header should be passed to the function
+ else CvMatND header should be passed */
+CVAPI(CvArr*) cvReshapeMatND( const CvArr* arr,
+ int sizeof_header, CvArr* header,
+ int new_cn, int new_dims, int* new_sizes );
+
+#define cvReshapeND( arr, header, new_cn, new_dims, new_sizes ) \
+ cvReshapeMatND( (arr), sizeof(*(header)), (header), \
+ (new_cn), (new_dims), (new_sizes))
+
+CVAPI(CvMat*) cvReshape( const CvArr* arr, CvMat* header,
+ int new_cn, int new_rows CV_DEFAULT(0) );
+
+/* Repeats source 2d array several times in both horizontal and
+ vertical direction to fill destination array */
+CVAPI(void) cvRepeat( const CvArr* src, CvArr* dst );
+
+/* Allocates array data */
+CVAPI(void) cvCreateData( CvArr* arr );
+
+/* Releases array data */
+CVAPI(void) cvReleaseData( CvArr* arr );
+
+/* Attaches user data to the array header. The step is reffered to
+ the pre-last dimension. That is, all the planes of the array
+ must be joint (w/o gaps) */
+CVAPI(void) cvSetData( CvArr* arr, void* data, int step );
+
+/* Retrieves raw data of CvMat, IplImage or CvMatND.
+ In the latter case the function raises an error if
+ the array can not be represented as a matrix */
+CVAPI(void) cvGetRawData( const CvArr* arr, uchar** data,
+ int* step CV_DEFAULT(NULL),
+ CvSize* roi_size CV_DEFAULT(NULL));
+
+/* Returns width and height of array in elements */
+CVAPI(CvSize) cvGetSize( const CvArr* arr );
+
+/* Copies source array to destination array */
+CVAPI(void) cvCopy( const CvArr* src, CvArr* dst,
+ const CvArr* mask CV_DEFAULT(NULL) );
+
+/* Sets all or "masked" elements of input array
+ to the same value*/
+CVAPI(void) cvSet( CvArr* arr, CvScalar value,
+ const CvArr* mask CV_DEFAULT(NULL) );
+
+/* Clears all the array elements (sets them to 0) */
+CVAPI(void) cvSetZero( CvArr* arr );
+#define cvZero cvSetZero
+
+
+/* Splits a multi-channel array into the set of single-channel arrays or
+ extracts particular [color] plane */
+CVAPI(void) cvSplit( const CvArr* src, CvArr* dst0, CvArr* dst1,
+ CvArr* dst2, CvArr* dst3 );
+
+/* Merges a set of single-channel arrays into the single multi-channel array
+ or inserts one particular [color] plane to the array */
+CVAPI(void) cvMerge( const CvArr* src0, const CvArr* src1,
+ const CvArr* src2, const CvArr* src3,
+ CvArr* dst );
+
+/* Copies several channels from input arrays to
+ certain channels of output arrays */
+CVAPI(void) cvMixChannels( const CvArr** src, int src_count,
+ CvArr** dst, int dst_count,
+ const int* from_to, int pair_count );
+
+/* Performs linear transformation on every source array element:
+ dst(x,y,c) = scale*src(x,y,c)+shift.
+ Arbitrary combination of input and output array depths are allowed
+ (number of channels must be the same), thus the function can be used
+ for type conversion */
+CVAPI(void) cvConvertScale( const CvArr* src, CvArr* dst,
+ double scale CV_DEFAULT(1),
+ double shift CV_DEFAULT(0) );
+#define cvCvtScale cvConvertScale
+#define cvScale cvConvertScale
+#define cvConvert( src, dst ) cvConvertScale( (src), (dst), 1, 0 )
+
+
+/* Performs linear transformation on every source array element,
+ stores absolute value of the result:
+ dst(x,y,c) = abs(scale*src(x,y,c)+shift).
+ destination array must have 8u type.
+ In other cases one may use cvConvertScale + cvAbsDiffS */
+CVAPI(void) cvConvertScaleAbs( const CvArr* src, CvArr* dst,
+ double scale CV_DEFAULT(1),
+ double shift CV_DEFAULT(0) );
+#define cvCvtScaleAbs cvConvertScaleAbs
+
+
+/* checks termination criteria validity and
+ sets eps to default_eps (if it is not set),
+ max_iter to default_max_iters (if it is not set)
+*/
+CVAPI(CvTermCriteria) cvCheckTermCriteria( CvTermCriteria criteria,
+ double default_eps,
+ int default_max_iters );
+
+/****************************************************************************************\
+* Arithmetic, logic and comparison operations *
+\****************************************************************************************/
+
+/* dst(mask) = src1(mask) + src2(mask) */
+CVAPI(void) cvAdd( const CvArr* src1, const CvArr* src2, CvArr* dst,
+ const CvArr* mask CV_DEFAULT(NULL));
+
+/* dst(mask) = src(mask) + value */
+CVAPI(void) cvAddS( const CvArr* src, CvScalar value, CvArr* dst,
+ const CvArr* mask CV_DEFAULT(NULL));
+
+/* dst(mask) = src1(mask) - src2(mask) */
+CVAPI(void) cvSub( const CvArr* src1, const CvArr* src2, CvArr* dst,
+ const CvArr* mask CV_DEFAULT(NULL));
+
+/* dst(mask) = src(mask) - value = src(mask) + (-value) */
+CV_INLINE void cvSubS( const CvArr* src, CvScalar value, CvArr* dst,
+ const CvArr* mask CV_DEFAULT(NULL))
+{
+ cvAddS( src, cvScalar( -value.val[0], -value.val[1], -value.val[2], -value.val[3]),
+ dst, mask );
+}
+
+/* dst(mask) = value - src(mask) */
+CVAPI(void) cvSubRS( const CvArr* src, CvScalar value, CvArr* dst,
+ const CvArr* mask CV_DEFAULT(NULL));
+
+/* dst(idx) = src1(idx) * src2(idx) * scale
+ (scaled element-wise multiplication of 2 arrays) */
+CVAPI(void) cvMul( const CvArr* src1, const CvArr* src2,
+ CvArr* dst, double scale CV_DEFAULT(1) );
+
+/* element-wise division/inversion with scaling:
+ dst(idx) = src1(idx) * scale / src2(idx)
+ or dst(idx) = scale / src2(idx) if src1 == 0 */
+CVAPI(void) cvDiv( const CvArr* src1, const CvArr* src2,
+ CvArr* dst, double scale CV_DEFAULT(1));
+
+/* dst = src1 * scale + src2 */
+CVAPI(void) cvScaleAdd( const CvArr* src1, CvScalar scale,
+ const CvArr* src2, CvArr* dst );
+#define cvAXPY( A, real_scalar, B, C ) cvScaleAdd(A, cvRealScalar(real_scalar), B, C)
+
+/* dst = src1 * alpha + src2 * beta + gamma */
+CVAPI(void) cvAddWeighted( const CvArr* src1, double alpha,
+ const CvArr* src2, double beta,
+ double gamma, CvArr* dst );
+
+/* result = sum_i(src1(i) * src2(i)) (results for all channels are accumulated together) */
+CVAPI(double) cvDotProduct( const CvArr* src1, const CvArr* src2 );
+
+/* dst(idx) = src1(idx) & src2(idx) */
+CVAPI(void) cvAnd( const CvArr* src1, const CvArr* src2,
+ CvArr* dst, const CvArr* mask CV_DEFAULT(NULL));
+
+/* dst(idx) = src(idx) & value */
+CVAPI(void) cvAndS( const CvArr* src, CvScalar value,
+ CvArr* dst, const CvArr* mask CV_DEFAULT(NULL));
+
+/* dst(idx) = src1(idx) | src2(idx) */
+CVAPI(void) cvOr( const CvArr* src1, const CvArr* src2,
+ CvArr* dst, const CvArr* mask CV_DEFAULT(NULL));
+
+/* dst(idx) = src(idx) | value */
+CVAPI(void) cvOrS( const CvArr* src, CvScalar value,
+ CvArr* dst, const CvArr* mask CV_DEFAULT(NULL));
+
+/* dst(idx) = src1(idx) ^ src2(idx) */
+CVAPI(void) cvXor( const CvArr* src1, const CvArr* src2,
+ CvArr* dst, const CvArr* mask CV_DEFAULT(NULL));
+
+/* dst(idx) = src(idx) ^ value */
+CVAPI(void) cvXorS( const CvArr* src, CvScalar value,
+ CvArr* dst, const CvArr* mask CV_DEFAULT(NULL));
+
+/* dst(idx) = ~src(idx) */
+CVAPI(void) cvNot( const CvArr* src, CvArr* dst );
+
+/* dst(idx) = lower(idx) <= src(idx) < upper(idx) */
+CVAPI(void) cvInRange( const CvArr* src, const CvArr* lower,
+ const CvArr* upper, CvArr* dst );
+
+/* dst(idx) = lower <= src(idx) < upper */
+CVAPI(void) cvInRangeS( const CvArr* src, CvScalar lower,
+ CvScalar upper, CvArr* dst );
+
+#define CV_CMP_EQ 0
+#define CV_CMP_GT 1
+#define CV_CMP_GE 2
+#define CV_CMP_LT 3
+#define CV_CMP_LE 4
+#define CV_CMP_NE 5
+
+/* The comparison operation support single-channel arrays only.
+ Destination image should be 8uC1 or 8sC1 */
+
+/* dst(idx) = src1(idx) _cmp_op_ src2(idx) */
+CVAPI(void) cvCmp( const CvArr* src1, const CvArr* src2, CvArr* dst, int cmp_op );
+
+/* dst(idx) = src1(idx) _cmp_op_ value */
+CVAPI(void) cvCmpS( const CvArr* src, double value, CvArr* dst, int cmp_op );
+
+/* dst(idx) = min(src1(idx),src2(idx)) */
+CVAPI(void) cvMin( const CvArr* src1, const CvArr* src2, CvArr* dst );
+
+/* dst(idx) = max(src1(idx),src2(idx)) */
+CVAPI(void) cvMax( const CvArr* src1, const CvArr* src2, CvArr* dst );
+
+/* dst(idx) = min(src(idx),value) */
+CVAPI(void) cvMinS( const CvArr* src, double value, CvArr* dst );
+
+/* dst(idx) = max(src(idx),value) */
+CVAPI(void) cvMaxS( const CvArr* src, double value, CvArr* dst );
+
+/* dst(x,y,c) = abs(src1(x,y,c) - src2(x,y,c)) */
+CVAPI(void) cvAbsDiff( const CvArr* src1, const CvArr* src2, CvArr* dst );
+
+/* dst(x,y,c) = abs(src(x,y,c) - value(c)) */
+CVAPI(void) cvAbsDiffS( const CvArr* src, CvArr* dst, CvScalar value );
+#define cvAbs( src, dst ) cvAbsDiffS( (src), (dst), cvScalarAll(0))
+
+/****************************************************************************************\
+* Math operations *
+\****************************************************************************************/
+
+/* Does cartesian->polar coordinates conversion.
+ Either of output components (magnitude or angle) is optional */
+CVAPI(void) cvCartToPolar( const CvArr* x, const CvArr* y,
+ CvArr* magnitude, CvArr* angle CV_DEFAULT(NULL),
+ int angle_in_degrees CV_DEFAULT(0));
+
+/* Does polar->cartesian coordinates conversion.
+ Either of output components (magnitude or angle) is optional.
+ If magnitude is missing it is assumed to be all 1's */
+CVAPI(void) cvPolarToCart( const CvArr* magnitude, const CvArr* angle,
+ CvArr* x, CvArr* y,
+ int angle_in_degrees CV_DEFAULT(0));
+
+/* Does powering: dst(idx) = src(idx)^power */
+CVAPI(void) cvPow( const CvArr* src, CvArr* dst, double power );
+
+/* Does exponention: dst(idx) = exp(src(idx)).
+ Overflow is not handled yet. Underflow is handled.
+ Maximal relative error is ~7e-6 for single-precision input */
+CVAPI(void) cvExp( const CvArr* src, CvArr* dst );
+
+/* Calculates natural logarithms: dst(idx) = log(abs(src(idx))).
+ Logarithm of 0 gives large negative number(~-700)
+ Maximal relative error is ~3e-7 for single-precision output
+*/
+CVAPI(void) cvLog( const CvArr* src, CvArr* dst );
+
+/* Fast arctangent calculation */
+CVAPI(float) cvFastArctan( float y, float x );
+
+/* Fast cubic root calculation */
+CVAPI(float) cvCbrt( float value );
+
+/* Checks array values for NaNs, Infs or simply for too large numbers
+ (if CV_CHECK_RANGE is set). If CV_CHECK_QUIET is set,
+ no runtime errors is raised (function returns zero value in case of "bad" values).
+ Otherwise cvError is called */
+#define CV_CHECK_RANGE 1
+#define CV_CHECK_QUIET 2
+CVAPI(int) cvCheckArr( const CvArr* arr, int flags CV_DEFAULT(0),
+ double min_val CV_DEFAULT(0), double max_val CV_DEFAULT(0));
+#define cvCheckArray cvCheckArr
+
+#define CV_RAND_UNI 0
+#define CV_RAND_NORMAL 1
+CVAPI(void) cvRandArr( CvRNG* rng, CvArr* arr, int dist_type,
+ CvScalar param1, CvScalar param2 );
+
+CVAPI(void) cvRandShuffle( CvArr* mat, CvRNG* rng,
+ double iter_factor CV_DEFAULT(1.));
+
+#define CV_SORT_EVERY_ROW 0
+#define CV_SORT_EVERY_COLUMN 1
+#define CV_SORT_ASCENDING 0
+#define CV_SORT_DESCENDING 16
+
+CVAPI(void) cvSort( const CvArr* src, CvArr* dst CV_DEFAULT(NULL),
+ CvArr* idxmat CV_DEFAULT(NULL),
+ int flags CV_DEFAULT(0));
+
+/* Finds real roots of a cubic equation */
+CVAPI(int) cvSolveCubic( const CvMat* coeffs, CvMat* roots );
+
+/* Finds all real and complex roots of a polynomial equation */
+CVAPI(void) cvSolvePoly(const CvMat* coeffs, CvMat *roots2,
+ int maxiter CV_DEFAULT(20), int fig CV_DEFAULT(100));
+
+/****************************************************************************************\
+* Matrix operations *
+\****************************************************************************************/
+
+/* Calculates cross product of two 3d vectors */
+CVAPI(void) cvCrossProduct( const CvArr* src1, const CvArr* src2, CvArr* dst );
+
+/* Matrix transform: dst = A*B + C, C is optional */
+#define cvMatMulAdd( src1, src2, src3, dst ) cvGEMM( (src1), (src2), 1., (src3), 1., (dst), 0 )
+#define cvMatMul( src1, src2, dst ) cvMatMulAdd( (src1), (src2), NULL, (dst))
+
+#define CV_GEMM_A_T 1
+#define CV_GEMM_B_T 2
+#define CV_GEMM_C_T 4
+/* Extended matrix transform:
+ dst = alpha*op(A)*op(B) + beta*op(C), where op(X) is X or X^T */
+CVAPI(void) cvGEMM( const CvArr* src1, const CvArr* src2, double alpha,
+ const CvArr* src3, double beta, CvArr* dst,
+ int tABC CV_DEFAULT(0));
+#define cvMatMulAddEx cvGEMM
+
+/* Transforms each element of source array and stores
+ resultant vectors in destination array */
+CVAPI(void) cvTransform( const CvArr* src, CvArr* dst,
+ const CvMat* transmat,
+ const CvMat* shiftvec CV_DEFAULT(NULL));
+#define cvMatMulAddS cvTransform
+
+/* Does perspective transform on every element of input array */
+CVAPI(void) cvPerspectiveTransform( const CvArr* src, CvArr* dst,
+ const CvMat* mat );
+
+/* Calculates (A-delta)*(A-delta)^T (order=0) or (A-delta)^T*(A-delta) (order=1) */
+CVAPI(void) cvMulTransposed( const CvArr* src, CvArr* dst, int order,
+ const CvArr* delta CV_DEFAULT(NULL),
+ double scale CV_DEFAULT(1.) );
+
+/* Tranposes matrix. Square matrices can be transposed in-place */
+CVAPI(void) cvTranspose( const CvArr* src, CvArr* dst );
+#define cvT cvTranspose
+
+/* Completes the symmetric matrix from the lower (LtoR=0) or from the upper (LtoR!=0) part */
+CVAPI(void) cvCompleteSymm( CvMat* matrix, int LtoR CV_DEFAULT(0) );
+
+/* Mirror array data around horizontal (flip=0),
+ vertical (flip=1) or both(flip=-1) axises:
+ cvFlip(src) flips images vertically and sequences horizontally (inplace) */
+CVAPI(void) cvFlip( const CvArr* src, CvArr* dst CV_DEFAULT(NULL),
+ int flip_mode CV_DEFAULT(0));
+#define cvMirror cvFlip
+
+
+#define CV_SVD_MODIFY_A 1
+#define CV_SVD_U_T 2
+#define CV_SVD_V_T 4
+
+/* Performs Singular Value Decomposition of a matrix */
+CVAPI(void) cvSVD( CvArr* A, CvArr* W, CvArr* U CV_DEFAULT(NULL),
+ CvArr* V CV_DEFAULT(NULL), int flags CV_DEFAULT(0));
+
+/* Performs Singular Value Back Substitution (solves A*X = B):
+ flags must be the same as in cvSVD */
+CVAPI(void) cvSVBkSb( const CvArr* W, const CvArr* U,
+ const CvArr* V, const CvArr* B,
+ CvArr* X, int flags );
+
+#define CV_LU 0
+#define CV_SVD 1
+#define CV_SVD_SYM 2
+#define CV_CHOLESKY 3
+#define CV_QR 4
+#define CV_NORMAL 16
+
+/* Inverts matrix */
+CVAPI(double) cvInvert( const CvArr* src, CvArr* dst,
+ int method CV_DEFAULT(CV_LU));
+#define cvInv cvInvert
+
+/* Solves linear system (src1)*(dst) = (src2)
+ (returns 0 if src1 is a singular and CV_LU method is used) */
+CVAPI(int) cvSolve( const CvArr* src1, const CvArr* src2, CvArr* dst,
+ int method CV_DEFAULT(CV_LU));
+
+/* Calculates determinant of input matrix */
+CVAPI(double) cvDet( const CvArr* mat );
+
+/* Calculates trace of the matrix (sum of elements on the main diagonal) */
+CVAPI(CvScalar) cvTrace( const CvArr* mat );
+
+/* Finds eigen values and vectors of a symmetric matrix */
+CVAPI(void) cvEigenVV( CvArr* mat, CvArr* evects, CvArr* evals,
+ double eps CV_DEFAULT(0),
+ int lowindex CV_DEFAULT(-1),
+ int highindex CV_DEFAULT(-1));
+
+///* Finds selected eigen values and vectors of a symmetric matrix */
+//CVAPI(void) cvSelectedEigenVV( CvArr* mat, CvArr* evects, CvArr* evals,
+// int lowindex, int highindex );
+
+/* Makes an identity matrix (mat_ij = i == j) */
+CVAPI(void) cvSetIdentity( CvArr* mat, CvScalar value CV_DEFAULT(cvRealScalar(1)) );
+
+/* Fills matrix with given range of numbers */
+CVAPI(CvArr*) cvRange( CvArr* mat, double start, double end );
+
+/* Calculates covariation matrix for a set of vectors */
+/* transpose([v1-avg, v2-avg,...]) * [v1-avg,v2-avg,...] */
+#define CV_COVAR_SCRAMBLED 0
+
+/* [v1-avg, v2-avg,...] * transpose([v1-avg,v2-avg,...]) */
+#define CV_COVAR_NORMAL 1
+
+/* do not calc average (i.e. mean vector) - use the input vector instead
+ (useful for calculating covariance matrix by parts) */
+#define CV_COVAR_USE_AVG 2
+
+/* scale the covariance matrix coefficients by number of the vectors */
+#define CV_COVAR_SCALE 4
+
+/* all the input vectors are stored in a single matrix, as its rows */
+#define CV_COVAR_ROWS 8
+
+/* all the input vectors are stored in a single matrix, as its columns */
+#define CV_COVAR_COLS 16
+
+CVAPI(void) cvCalcCovarMatrix( const CvArr** vects, int count,
+ CvArr* cov_mat, CvArr* avg, int flags );
+
+#define CV_PCA_DATA_AS_ROW 0
+#define CV_PCA_DATA_AS_COL 1
+#define CV_PCA_USE_AVG 2
+CVAPI(void) cvCalcPCA( const CvArr* data, CvArr* mean,
+ CvArr* eigenvals, CvArr* eigenvects, int flags );
+
+CVAPI(void) cvProjectPCA( const CvArr* data, const CvArr* mean,
+ const CvArr* eigenvects, CvArr* result );
+
+CVAPI(void) cvBackProjectPCA( const CvArr* proj, const CvArr* mean,
+ const CvArr* eigenvects, CvArr* result );
+
+/* Calculates Mahalanobis(weighted) distance */
+CVAPI(double) cvMahalanobis( const CvArr* vec1, const CvArr* vec2, const CvArr* mat );
+#define cvMahalonobis cvMahalanobis
+
+/****************************************************************************************\
+* Array Statistics *
+\****************************************************************************************/
+
+/* Finds sum of array elements */
+CVAPI(CvScalar) cvSum( const CvArr* arr );
+
+/* Calculates number of non-zero pixels */
+CVAPI(int) cvCountNonZero( const CvArr* arr );
+
+/* Calculates mean value of array elements */
+CVAPI(CvScalar) cvAvg( const CvArr* arr, const CvArr* mask CV_DEFAULT(NULL) );
+
+/* Calculates mean and standard deviation of pixel values */
+CVAPI(void) cvAvgSdv( const CvArr* arr, CvScalar* mean, CvScalar* std_dev,
+ const CvArr* mask CV_DEFAULT(NULL) );
+
+/* Finds global minimum, maximum and their positions */
+CVAPI(void) cvMinMaxLoc( const CvArr* arr, double* min_val, double* max_val,
+ CvPoint* min_loc CV_DEFAULT(NULL),
+ CvPoint* max_loc CV_DEFAULT(NULL),
+ const CvArr* mask CV_DEFAULT(NULL) );
+
+/* types of array norm */
+#define CV_C 1
+#define CV_L1 2
+#define CV_L2 4
+#define CV_NORM_MASK 7
+#define CV_RELATIVE 8
+#define CV_DIFF 16
+#define CV_MINMAX 32
+
+#define CV_DIFF_C (CV_DIFF | CV_C)
+#define CV_DIFF_L1 (CV_DIFF | CV_L1)
+#define CV_DIFF_L2 (CV_DIFF | CV_L2)
+#define CV_RELATIVE_C (CV_RELATIVE | CV_C)
+#define CV_RELATIVE_L1 (CV_RELATIVE | CV_L1)
+#define CV_RELATIVE_L2 (CV_RELATIVE | CV_L2)
+
+/* Finds norm, difference norm or relative difference norm for an array (or two arrays) */
+CVAPI(double) cvNorm( const CvArr* arr1, const CvArr* arr2 CV_DEFAULT(NULL),
+ int norm_type CV_DEFAULT(CV_L2),
+ const CvArr* mask CV_DEFAULT(NULL) );
+
+CVAPI(void) cvNormalize( const CvArr* src, CvArr* dst,
+ double a CV_DEFAULT(1.), double b CV_DEFAULT(0.),
+ int norm_type CV_DEFAULT(CV_L2),
+ const CvArr* mask CV_DEFAULT(NULL) );
+
+
+#define CV_REDUCE_SUM 0
+#define CV_REDUCE_AVG 1
+#define CV_REDUCE_MAX 2
+#define CV_REDUCE_MIN 3
+
+CVAPI(void) cvReduce( const CvArr* src, CvArr* dst, int dim CV_DEFAULT(-1),
+ int op CV_DEFAULT(CV_REDUCE_SUM) );
+
+/****************************************************************************************\
+* Discrete Linear Transforms and Related Functions *
+\****************************************************************************************/
+
+#define CV_DXT_FORWARD 0
+#define CV_DXT_INVERSE 1
+#define CV_DXT_SCALE 2 /* divide result by size of array */
+#define CV_DXT_INV_SCALE (CV_DXT_INVERSE + CV_DXT_SCALE)
+#define CV_DXT_INVERSE_SCALE CV_DXT_INV_SCALE
+#define CV_DXT_ROWS 4 /* transform each row individually */
+#define CV_DXT_MUL_CONJ 8 /* conjugate the second argument of cvMulSpectrums */
+
+/* Discrete Fourier Transform:
+ complex->complex,
+ real->ccs (forward),
+ ccs->real (inverse) */
+CVAPI(void) cvDFT( const CvArr* src, CvArr* dst, int flags,
+ int nonzero_rows CV_DEFAULT(0) );
+#define cvFFT cvDFT
+
+/* Multiply results of DFTs: DFT(X)*DFT(Y) or DFT(X)*conj(DFT(Y)) */
+CVAPI(void) cvMulSpectrums( const CvArr* src1, const CvArr* src2,
+ CvArr* dst, int flags );
+
+/* Finds optimal DFT vector size >= size0 */
+CVAPI(int) cvGetOptimalDFTSize( int size0 );
+
+/* Discrete Cosine Transform */
+CVAPI(void) cvDCT( const CvArr* src, CvArr* dst, int flags );
+
+/****************************************************************************************\
+* Dynamic data structures *
+\****************************************************************************************/
+
+/* Calculates length of sequence slice (with support of negative indices). */
+CVAPI(int) cvSliceLength( CvSlice slice, const CvSeq* seq );
+
+
+/* Creates new memory storage.
+ block_size == 0 means that default,
+ somewhat optimal size, is used (currently, it is 64K) */
+CVAPI(CvMemStorage*) cvCreateMemStorage( int block_size CV_DEFAULT(0));
+
+
+/* Creates a memory storage that will borrow memory blocks from parent storage */
+CVAPI(CvMemStorage*) cvCreateChildMemStorage( CvMemStorage* parent );
+
+
+/* Releases memory storage. All the children of a parent must be released before
+ the parent. A child storage returns all the blocks to parent when it is released */
+CVAPI(void) cvReleaseMemStorage( CvMemStorage** storage );
+
+
+/* Clears memory storage. This is the only way(!!!) (besides cvRestoreMemStoragePos)
+ to reuse memory allocated for the storage - cvClearSeq,cvClearSet ...
+ do not free any memory.
+ A child storage returns all the blocks to the parent when it is cleared */
+CVAPI(void) cvClearMemStorage( CvMemStorage* storage );
+
+/* Remember a storage "free memory" position */
+CVAPI(void) cvSaveMemStoragePos( const CvMemStorage* storage, CvMemStoragePos* pos );
+
+/* Restore a storage "free memory" position */
+CVAPI(void) cvRestoreMemStoragePos( CvMemStorage* storage, CvMemStoragePos* pos );
+
+/* Allocates continuous buffer of the specified size in the storage */
+CVAPI(void*) cvMemStorageAlloc( CvMemStorage* storage, size_t size );
+
+/* Allocates string in memory storage */
+CVAPI(CvString) cvMemStorageAllocString( CvMemStorage* storage, const char* ptr,
+ int len CV_DEFAULT(-1) );
+
+/* Creates new empty sequence that will reside in the specified storage */
+CVAPI(CvSeq*) cvCreateSeq( int seq_flags, size_t header_size,
+ size_t elem_size, CvMemStorage* storage );
+
+/* Changes default size (granularity) of sequence blocks.
+ The default size is ~1Kbyte */
+CVAPI(void) cvSetSeqBlockSize( CvSeq* seq, int delta_elems );
+
+
+/* Adds new element to the end of sequence. Returns pointer to the element */
+CVAPI(schar*) cvSeqPush( CvSeq* seq, const void* element CV_DEFAULT(NULL));
+
+
+/* Adds new element to the beginning of sequence. Returns pointer to it */
+CVAPI(schar*) cvSeqPushFront( CvSeq* seq, const void* element CV_DEFAULT(NULL));
+
+
+/* Removes the last element from sequence and optionally saves it */
+CVAPI(void) cvSeqPop( CvSeq* seq, void* element CV_DEFAULT(NULL));
+
+
+/* Removes the first element from sequence and optioanally saves it */
+CVAPI(void) cvSeqPopFront( CvSeq* seq, void* element CV_DEFAULT(NULL));
+
+
+#define CV_FRONT 1
+#define CV_BACK 0
+/* Adds several new elements to the end of sequence */
+CVAPI(void) cvSeqPushMulti( CvSeq* seq, const void* elements,
+ int count, int in_front CV_DEFAULT(0) );
+
+/* Removes several elements from the end of sequence and optionally saves them */
+CVAPI(void) cvSeqPopMulti( CvSeq* seq, void* elements,
+ int count, int in_front CV_DEFAULT(0) );
+
+/* Inserts a new element in the middle of sequence.
+ cvSeqInsert(seq,0,elem) == cvSeqPushFront(seq,elem) */
+CVAPI(schar*) cvSeqInsert( CvSeq* seq, int before_index,
+ const void* element CV_DEFAULT(NULL));
+
+/* Removes specified sequence element */
+CVAPI(void) cvSeqRemove( CvSeq* seq, int index );
+
+
+/* Removes all the elements from the sequence. The freed memory
+ can be reused later only by the same sequence unless cvClearMemStorage
+ or cvRestoreMemStoragePos is called */
+CVAPI(void) cvClearSeq( CvSeq* seq );
+
+
+/* Retrieves pointer to specified sequence element.
+ Negative indices are supported and mean counting from the end
+ (e.g -1 means the last sequence element) */
+CVAPI(schar*) cvGetSeqElem( const CvSeq* seq, int index );
+
+/* Calculates index of the specified sequence element.
+ Returns -1 if element does not belong to the sequence */
+CVAPI(int) cvSeqElemIdx( const CvSeq* seq, const void* element,
+ CvSeqBlock** block CV_DEFAULT(NULL) );
+
+/* Initializes sequence writer. The new elements will be added to the end of sequence */
+CVAPI(void) cvStartAppendToSeq( CvSeq* seq, CvSeqWriter* writer );
+
+
+/* Combination of cvCreateSeq and cvStartAppendToSeq */
+CVAPI(void) cvStartWriteSeq( int seq_flags, int header_size,
+ int elem_size, CvMemStorage* storage,
+ CvSeqWriter* writer );
+
+/* Closes sequence writer, updates sequence header and returns pointer
+ to the resultant sequence
+ (which may be useful if the sequence was created using cvStartWriteSeq))
+*/
+CVAPI(CvSeq*) cvEndWriteSeq( CvSeqWriter* writer );
+
+
+/* Updates sequence header. May be useful to get access to some of previously
+ written elements via cvGetSeqElem or sequence reader */
+CVAPI(void) cvFlushSeqWriter( CvSeqWriter* writer );
+
+
+/* Initializes sequence reader.
+ The sequence can be read in forward or backward direction */
+CVAPI(void) cvStartReadSeq( const CvSeq* seq, CvSeqReader* reader,
+ int reverse CV_DEFAULT(0) );
+
+
+/* Returns current sequence reader position (currently observed sequence element) */
+CVAPI(int) cvGetSeqReaderPos( CvSeqReader* reader );
+
+
+/* Changes sequence reader position. It may seek to an absolute or
+ to relative to the current position */
+CVAPI(void) cvSetSeqReaderPos( CvSeqReader* reader, int index,
+ int is_relative CV_DEFAULT(0));
+
+/* Copies sequence content to a continuous piece of memory */
+CVAPI(void*) cvCvtSeqToArray( const CvSeq* seq, void* elements,
+ CvSlice slice CV_DEFAULT(CV_WHOLE_SEQ) );
+
+/* Creates sequence header for array.
+ After that all the operations on sequences that do not alter the content
+ can be applied to the resultant sequence */
+CVAPI(CvSeq*) cvMakeSeqHeaderForArray( int seq_type, int header_size,
+ int elem_size, void* elements, int total,
+ CvSeq* seq, CvSeqBlock* block );
+
+/* Extracts sequence slice (with or without copying sequence elements) */
+CVAPI(CvSeq*) cvSeqSlice( const CvSeq* seq, CvSlice slice,
+ CvMemStorage* storage CV_DEFAULT(NULL),
+ int copy_data CV_DEFAULT(0));
+
+CV_INLINE CvSeq* cvCloneSeq( const CvSeq* seq, CvMemStorage* storage CV_DEFAULT(NULL))
+{
+ return cvSeqSlice( seq, CV_WHOLE_SEQ, storage, 1 );
+}
+
+/* Removes sequence slice */
+CVAPI(void) cvSeqRemoveSlice( CvSeq* seq, CvSlice slice );
+
+/* Inserts a sequence or array into another sequence */
+CVAPI(void) cvSeqInsertSlice( CvSeq* seq, int before_index, const CvArr* from_arr );
+
+/* a < b ? -1 : a > b ? 1 : 0 */
+typedef int (CV_CDECL* CvCmpFunc)(const void* a, const void* b, void* userdata );
+
+/* Sorts sequence in-place given element comparison function */
+CVAPI(void) cvSeqSort( CvSeq* seq, CvCmpFunc func, void* userdata CV_DEFAULT(NULL) );
+
+/* Finds element in a [sorted] sequence */
+CVAPI(schar*) cvSeqSearch( CvSeq* seq, const void* elem, CvCmpFunc func,
+ int is_sorted, int* elem_idx,
+ void* userdata CV_DEFAULT(NULL) );
+
+/* Reverses order of sequence elements in-place */
+CVAPI(void) cvSeqInvert( CvSeq* seq );
+
+/* Splits sequence into one or more equivalence classes using the specified criteria */
+CVAPI(int) cvSeqPartition( const CvSeq* seq, CvMemStorage* storage,
+ CvSeq** labels, CvCmpFunc is_equal, void* userdata );
+
+/************ Internal sequence functions ************/
+CVAPI(void) cvChangeSeqBlock( void* reader, int direction );
+CVAPI(void) cvCreateSeqBlock( CvSeqWriter* writer );
+
+
+/* Creates a new set */
+CVAPI(CvSet*) cvCreateSet( int set_flags, int header_size,
+ int elem_size, CvMemStorage* storage );
+
+/* Adds new element to the set and returns pointer to it */
+CVAPI(int) cvSetAdd( CvSet* set_header, CvSetElem* elem CV_DEFAULT(NULL),
+ CvSetElem** inserted_elem CV_DEFAULT(NULL) );
+
+/* Fast variant of cvSetAdd */
+CV_INLINE CvSetElem* cvSetNew( CvSet* set_header )
+{
+ CvSetElem* elem = set_header->free_elems;
+ if( elem )
+ {
+ set_header->free_elems = elem->next_free;
+ elem->flags = elem->flags & CV_SET_ELEM_IDX_MASK;
+ set_header->active_count++;
+ }
+ else
+ cvSetAdd( set_header, NULL, &elem );
+ return elem;
+}
+
+/* Removes set element given its pointer */
+CV_INLINE void cvSetRemoveByPtr( CvSet* set_header, void* elem )
+{
+ CvSetElem* _elem = (CvSetElem*)elem;
+ assert( _elem->flags >= 0 /*&& (elem->flags & CV_SET_ELEM_IDX_MASK) < set_header->total*/ );
+ _elem->next_free = set_header->free_elems;
+ _elem->flags = (_elem->flags & CV_SET_ELEM_IDX_MASK) | CV_SET_ELEM_FREE_FLAG;
+ set_header->free_elems = _elem;
+ set_header->active_count--;
+}
+
+/* Removes element from the set by its index */
+CVAPI(void) cvSetRemove( CvSet* set_header, int index );
+
+/* Returns a set element by index. If the element doesn't belong to the set,
+ NULL is returned */
+CV_INLINE CvSetElem* cvGetSetElem( const CvSet* set_header, int idx )
+{
+ CvSetElem* elem = (CvSetElem*)(void *)cvGetSeqElem( (CvSeq*)set_header, idx );
+ return elem && CV_IS_SET_ELEM( elem ) ? elem : 0;
+}
+
+/* Removes all the elements from the set */
+CVAPI(void) cvClearSet( CvSet* set_header );
+
+/* Creates new graph */
+CVAPI(CvGraph*) cvCreateGraph( int graph_flags, int header_size,
+ int vtx_size, int edge_size,
+ CvMemStorage* storage );
+
+/* Adds new vertex to the graph */
+CVAPI(int) cvGraphAddVtx( CvGraph* graph, const CvGraphVtx* vtx CV_DEFAULT(NULL),
+ CvGraphVtx** inserted_vtx CV_DEFAULT(NULL) );
+
+
+/* Removes vertex from the graph together with all incident edges */
+CVAPI(int) cvGraphRemoveVtx( CvGraph* graph, int index );
+CVAPI(int) cvGraphRemoveVtxByPtr( CvGraph* graph, CvGraphVtx* vtx );
+
+
+/* Link two vertices specifed by indices or pointers if they
+ are not connected or return pointer to already existing edge
+ connecting the vertices.
+ Functions return 1 if a new edge was created, 0 otherwise */
+CVAPI(int) cvGraphAddEdge( CvGraph* graph,
+ int start_idx, int end_idx,
+ const CvGraphEdge* edge CV_DEFAULT(NULL),
+ CvGraphEdge** inserted_edge CV_DEFAULT(NULL) );
+
+CVAPI(int) cvGraphAddEdgeByPtr( CvGraph* graph,
+ CvGraphVtx* start_vtx, CvGraphVtx* end_vtx,
+ const CvGraphEdge* edge CV_DEFAULT(NULL),
+ CvGraphEdge** inserted_edge CV_DEFAULT(NULL) );
+
+/* Remove edge connecting two vertices */
+CVAPI(void) cvGraphRemoveEdge( CvGraph* graph, int start_idx, int end_idx );
+CVAPI(void) cvGraphRemoveEdgeByPtr( CvGraph* graph, CvGraphVtx* start_vtx,
+ CvGraphVtx* end_vtx );
+
+/* Find edge connecting two vertices */
+CVAPI(CvGraphEdge*) cvFindGraphEdge( const CvGraph* graph, int start_idx, int end_idx );
+CVAPI(CvGraphEdge*) cvFindGraphEdgeByPtr( const CvGraph* graph,
+ const CvGraphVtx* start_vtx,
+ const CvGraphVtx* end_vtx );
+#define cvGraphFindEdge cvFindGraphEdge
+#define cvGraphFindEdgeByPtr cvFindGraphEdgeByPtr
+
+/* Remove all vertices and edges from the graph */
+CVAPI(void) cvClearGraph( CvGraph* graph );
+
+
+/* Count number of edges incident to the vertex */
+CVAPI(int) cvGraphVtxDegree( const CvGraph* graph, int vtx_idx );
+CVAPI(int) cvGraphVtxDegreeByPtr( const CvGraph* graph, const CvGraphVtx* vtx );
+
+
+/* Retrieves graph vertex by given index */
+#define cvGetGraphVtx( graph, idx ) (CvGraphVtx*)cvGetSetElem((CvSet*)(graph), (idx))
+
+/* Retrieves index of a graph vertex given its pointer */
+#define cvGraphVtxIdx( graph, vtx ) ((vtx)->flags & CV_SET_ELEM_IDX_MASK)
+
+/* Retrieves index of a graph edge given its pointer */
+#define cvGraphEdgeIdx( graph, edge ) ((edge)->flags & CV_SET_ELEM_IDX_MASK)
+
+#define cvGraphGetVtxCount( graph ) ((graph)->active_count)
+#define cvGraphGetEdgeCount( graph ) ((graph)->edges->active_count)
+
+#define CV_GRAPH_VERTEX 1
+#define CV_GRAPH_TREE_EDGE 2
+#define CV_GRAPH_BACK_EDGE 4
+#define CV_GRAPH_FORWARD_EDGE 8
+#define CV_GRAPH_CROSS_EDGE 16
+#define CV_GRAPH_ANY_EDGE 30
+#define CV_GRAPH_NEW_TREE 32
+#define CV_GRAPH_BACKTRACKING 64
+#define CV_GRAPH_OVER -1
+
+#define CV_GRAPH_ALL_ITEMS -1
+
+/* flags for graph vertices and edges */
+#define CV_GRAPH_ITEM_VISITED_FLAG (1 << 30)
+#define CV_IS_GRAPH_VERTEX_VISITED(vtx) \
+ (((CvGraphVtx*)(vtx))->flags & CV_GRAPH_ITEM_VISITED_FLAG)
+#define CV_IS_GRAPH_EDGE_VISITED(edge) \
+ (((CvGraphEdge*)(edge))->flags & CV_GRAPH_ITEM_VISITED_FLAG)
+#define CV_GRAPH_SEARCH_TREE_NODE_FLAG (1 << 29)
+#define CV_GRAPH_FORWARD_EDGE_FLAG (1 << 28)
+
+typedef struct CvGraphScanner
+{
+ CvGraphVtx* vtx; /* current graph vertex (or current edge origin) */
+ CvGraphVtx* dst; /* current graph edge destination vertex */
+ CvGraphEdge* edge; /* current edge */
+
+ CvGraph* graph; /* the graph */
+ CvSeq* stack; /* the graph vertex stack */
+ int index; /* the lower bound of certainly visited vertices */
+ int mask; /* event mask */
+}
+CvGraphScanner;
+
+/* Creates new graph scanner. */
+CVAPI(CvGraphScanner*) cvCreateGraphScanner( CvGraph* graph,
+ CvGraphVtx* vtx CV_DEFAULT(NULL),
+ int mask CV_DEFAULT(CV_GRAPH_ALL_ITEMS));
+
+/* Releases graph scanner. */
+CVAPI(void) cvReleaseGraphScanner( CvGraphScanner** scanner );
+
+/* Get next graph element */
+CVAPI(int) cvNextGraphItem( CvGraphScanner* scanner );
+
+/* Creates a copy of graph */
+CVAPI(CvGraph*) cvCloneGraph( const CvGraph* graph, CvMemStorage* storage );
+
+/****************************************************************************************\
+* Drawing *
+\****************************************************************************************/
+
+/****************************************************************************************\
+* Drawing functions work with images/matrices of arbitrary type. *
+* For color images the channel order is BGR[A] *
+* Antialiasing is supported only for 8-bit image now. *
+* All the functions include parameter color that means rgb value (that may be *
+* constructed with CV_RGB macro) for color images and brightness *
+* for grayscale images. *
+* If a drawn figure is partially or completely outside of the image, it is clipped.*
+\****************************************************************************************/
+
+#define CV_RGB( r, g, b ) cvScalar( (b), (g), (r), 0 )
+#define CV_FILLED -1
+
+#define CV_AA 16
+
+/* Draws 4-connected, 8-connected or antialiased line segment connecting two points */
+CVAPI(void) cvLine( CvArr* img, CvPoint pt1, CvPoint pt2,
+ CvScalar color, int thickness CV_DEFAULT(1),
+ int line_type CV_DEFAULT(8), int shift CV_DEFAULT(0) );
+
+/* Draws a rectangle given two opposite corners of the rectangle (pt1 & pt2),
+ if thickness<0 (e.g. thickness == CV_FILLED), the filled box is drawn */
+CVAPI(void) cvRectangle( CvArr* img, CvPoint pt1, CvPoint pt2,
+ CvScalar color, int thickness CV_DEFAULT(1),
+ int line_type CV_DEFAULT(8),
+ int shift CV_DEFAULT(0));
+
+/* Draws a rectangle specified by a CvRect structure */
+CVAPI(void) cvRectangleR( CvArr* img, CvRect r,
+ CvScalar color, int thickness CV_DEFAULT(1),
+ int line_type CV_DEFAULT(8),
+ int shift CV_DEFAULT(0));
+
+
+/* Draws a circle with specified center and radius.
+ Thickness works in the same way as with cvRectangle */
+CVAPI(void) cvCircle( CvArr* img, CvPoint center, int radius,
+ CvScalar color, int thickness CV_DEFAULT(1),
+ int line_type CV_DEFAULT(8), int shift CV_DEFAULT(0));
+
+/* Draws ellipse outline, filled ellipse, elliptic arc or filled elliptic sector,
+ depending on <thickness>, <start_angle> and <end_angle> parameters. The resultant figure
+ is rotated by <angle>. All the angles are in degrees */
+CVAPI(void) cvEllipse( CvArr* img, CvPoint center, CvSize axes,
+ double angle, double start_angle, double end_angle,
+ CvScalar color, int thickness CV_DEFAULT(1),
+ int line_type CV_DEFAULT(8), int shift CV_DEFAULT(0));
+
+CV_INLINE void cvEllipseBox( CvArr* img, CvBox2D box, CvScalar color,
+ int thickness CV_DEFAULT(1),
+ int line_type CV_DEFAULT(8), int shift CV_DEFAULT(0) )
+{
+ CvSize axes;
+ axes.width = cvRound(box.size.width*0.5);
+ axes.height = cvRound(box.size.height*0.5);
+
+ cvEllipse( img, cvPointFrom32f( box.center ), axes, box.angle,
+ 0, 360, color, thickness, line_type, shift );
+}
+
+/* Fills convex or monotonous polygon. */
+CVAPI(void) cvFillConvexPoly( CvArr* img, const CvPoint* pts, int npts, CvScalar color,
+ int line_type CV_DEFAULT(8), int shift CV_DEFAULT(0));
+
+/* Fills an area bounded by one or more arbitrary polygons */
+CVAPI(void) cvFillPoly( CvArr* img, CvPoint** pts, const int* npts,
+ int contours, CvScalar color,
+ int line_type CV_DEFAULT(8), int shift CV_DEFAULT(0) );
+
+/* Draws one or more polygonal curves */
+CVAPI(void) cvPolyLine( CvArr* img, CvPoint** pts, const int* npts, int contours,
+ int is_closed, CvScalar color, int thickness CV_DEFAULT(1),
+ int line_type CV_DEFAULT(8), int shift CV_DEFAULT(0) );
+
+#define cvDrawRect cvRectangle
+#define cvDrawLine cvLine
+#define cvDrawCircle cvCircle
+#define cvDrawEllipse cvEllipse
+#define cvDrawPolyLine cvPolyLine
+
+/* Clips the line segment connecting *pt1 and *pt2
+ by the rectangular window
+ (0<=x<img_size.width, 0<=y<img_size.height). */
+CVAPI(int) cvClipLine( CvSize img_size, CvPoint* pt1, CvPoint* pt2 );
+
+/* Initializes line iterator. Initially, line_iterator->ptr will point
+ to pt1 (or pt2, see left_to_right description) location in the image.
+ Returns the number of pixels on the line between the ending points. */
+CVAPI(int) cvInitLineIterator( const CvArr* image, CvPoint pt1, CvPoint pt2,
+ CvLineIterator* line_iterator,
+ int connectivity CV_DEFAULT(8),
+ int left_to_right CV_DEFAULT(0));
+
+/* Moves iterator to the next line point */
+#define CV_NEXT_LINE_POINT( line_iterator ) \
+{ \
+ int _line_iterator_mask = (line_iterator).err < 0 ? -1 : 0; \
+ (line_iterator).err += (line_iterator).minus_delta + \
+ ((line_iterator).plus_delta & _line_iterator_mask); \
+ (line_iterator).ptr += (line_iterator).minus_step + \
+ ((line_iterator).plus_step & _line_iterator_mask); \
+}
+
+
+/* basic font types */
+#define CV_FONT_HERSHEY_SIMPLEX 0
+#define CV_FONT_HERSHEY_PLAIN 1
+#define CV_FONT_HERSHEY_DUPLEX 2
+#define CV_FONT_HERSHEY_COMPLEX 3
+#define CV_FONT_HERSHEY_TRIPLEX 4
+#define CV_FONT_HERSHEY_COMPLEX_SMALL 5
+#define CV_FONT_HERSHEY_SCRIPT_SIMPLEX 6
+#define CV_FONT_HERSHEY_SCRIPT_COMPLEX 7
+
+/* font flags */
+#define CV_FONT_ITALIC 16
+
+#define CV_FONT_VECTOR0 CV_FONT_HERSHEY_SIMPLEX
+
+
+/* Font structure */
+typedef struct CvFont
+{
+ const char* nameFont; //Qt:nameFont
+ CvScalar color; //Qt:ColorFont -> cvScalar(blue_component, green_component, red\_component[, alpha_component])
+ int font_face; //Qt: bool italic /* =CV_FONT_* */
+ const int* ascii; /* font data and metrics */
+ const int* greek;
+ const int* cyrillic;
+ float hscale, vscale;
+ float shear; /* slope coefficient: 0 - normal, >0 - italic */
+ int thickness; //Qt: weight /* letters thickness */
+ float dx; /* horizontal interval between letters */
+ int line_type; //Qt: PointSize
+}
+CvFont;
+
+/* Initializes font structure used further in cvPutText */
+CVAPI(void) cvInitFont( CvFont* font, int font_face,
+ double hscale, double vscale,
+ double shear CV_DEFAULT(0),
+ int thickness CV_DEFAULT(1),
+ int line_type CV_DEFAULT(8));
+
+CV_INLINE CvFont cvFont( double scale, int thickness CV_DEFAULT(1) )
+{
+ CvFont font;
+ cvInitFont( &font, CV_FONT_HERSHEY_PLAIN, scale, scale, 0, thickness, CV_AA );
+ return font;
+}
+
+/* Renders text stroke with specified font and color at specified location.
+ CvFont should be initialized with cvInitFont */
+CVAPI(void) cvPutText( CvArr* img, const char* text, CvPoint org,
+ const CvFont* font, CvScalar color );
+
+/* Calculates bounding box of text stroke (useful for alignment) */
+CVAPI(void) cvGetTextSize( const char* text_string, const CvFont* font,
+ CvSize* text_size, int* baseline );
+
+
+
+/* Unpacks color value, if arrtype is CV_8UC?, <color> is treated as
+ packed color value, otherwise the first channels (depending on arrtype)
+ of destination scalar are set to the same value = <color> */
+CVAPI(CvScalar) cvColorToScalar( double packed_color, int arrtype );
+
+/* Returns the polygon points which make up the given ellipse. The ellipse is define by
+ the box of size 'axes' rotated 'angle' around the 'center'. A partial sweep
+ of the ellipse arc can be done by spcifying arc_start and arc_end to be something
+ other than 0 and 360, respectively. The input array 'pts' must be large enough to
+ hold the result. The total number of points stored into 'pts' is returned by this
+ function. */
+CVAPI(int) cvEllipse2Poly( CvPoint center, CvSize axes,
+ int angle, int arc_start, int arc_end, CvPoint * pts, int delta );
+
+/* Draws contour outlines or filled interiors on the image */
+CVAPI(void) cvDrawContours( CvArr *img, CvSeq* contour,
+ CvScalar external_color, CvScalar hole_color,
+ int max_level, int thickness CV_DEFAULT(1),
+ int line_type CV_DEFAULT(8),
+ CvPoint offset CV_DEFAULT(cvPoint(0,0)));
+
+/* Does look-up transformation. Elements of the source array
+ (that should be 8uC1 or 8sC1) are used as indexes in lutarr 256-element table */
+CVAPI(void) cvLUT( const CvArr* src, CvArr* dst, const CvArr* lut );
+
+
+/******************* Iteration through the sequence tree *****************/
+typedef struct CvTreeNodeIterator
+{
+ const void* node;
+ int level;
+ int max_level;
+}
+CvTreeNodeIterator;
+
+CVAPI(void) cvInitTreeNodeIterator( CvTreeNodeIterator* tree_iterator,
+ const void* first, int max_level );
+CVAPI(void*) cvNextTreeNode( CvTreeNodeIterator* tree_iterator );
+CVAPI(void*) cvPrevTreeNode( CvTreeNodeIterator* tree_iterator );
+
+/* Inserts sequence into tree with specified "parent" sequence.
+ If parent is equal to frame (e.g. the most external contour),
+ then added contour will have null pointer to parent. */
+CVAPI(void) cvInsertNodeIntoTree( void* node, void* parent, void* frame );
+
+/* Removes contour from tree (together with the contour children). */
+CVAPI(void) cvRemoveNodeFromTree( void* node, void* frame );
+
+/* Gathers pointers to all the sequences,
+ accessible from the <first>, to the single sequence */
+CVAPI(CvSeq*) cvTreeToNodeSeq( const void* first, int header_size,
+ CvMemStorage* storage );
+
+/* The function implements the K-means algorithm for clustering an array of sample
+ vectors in a specified number of classes */
+#define CV_KMEANS_USE_INITIAL_LABELS 1
+CVAPI(int) cvKMeans2( const CvArr* samples, int cluster_count, CvArr* labels,
+ CvTermCriteria termcrit, int attempts CV_DEFAULT(1),
+ CvRNG* rng CV_DEFAULT(0), int flags CV_DEFAULT(0),
+ CvArr* _centers CV_DEFAULT(0), double* compactness CV_DEFAULT(0) );
+
+/****************************************************************************************\
+* System functions *
+\****************************************************************************************/
+
+/* Add the function pointers table with associated information to the IPP primitives list */
+CVAPI(int) cvRegisterModule( const CvModuleInfo* module_info );
+
+/* Loads optimized functions from IPP, MKL etc. or switches back to pure C code */
+CVAPI(int) cvUseOptimized( int on_off );
+
+/* Retrieves information about the registered modules and loaded optimized plugins */
+CVAPI(void) cvGetModuleInfo( const char* module_name,
+ const char** version,
+ const char** loaded_addon_plugins );
+
+typedef void* (CV_CDECL *CvAllocFunc)(size_t size, void* userdata);
+typedef int (CV_CDECL *CvFreeFunc)(void* pptr, void* userdata);
+
+/* Set user-defined memory managment functions (substitutors for malloc and free) that
+ will be called by cvAlloc, cvFree and higher-level functions (e.g. cvCreateImage) */
+CVAPI(void) cvSetMemoryManager( CvAllocFunc alloc_func CV_DEFAULT(NULL),
+ CvFreeFunc free_func CV_DEFAULT(NULL),
+ void* userdata CV_DEFAULT(NULL));
+
+
+typedef IplImage* (CV_STDCALL* Cv_iplCreateImageHeader)
+ (int,int,int,char*,char*,int,int,int,int,int,
+ IplROI*,IplImage*,void*,IplTileInfo*);
+typedef void (CV_STDCALL* Cv_iplAllocateImageData)(IplImage*,int,int);
+typedef void (CV_STDCALL* Cv_iplDeallocate)(IplImage*,int);
+typedef IplROI* (CV_STDCALL* Cv_iplCreateROI)(int,int,int,int,int);
+typedef IplImage* (CV_STDCALL* Cv_iplCloneImage)(const IplImage*);
+
+/* Makes OpenCV use IPL functions for IplImage allocation/deallocation */
+CVAPI(void) cvSetIPLAllocators( Cv_iplCreateImageHeader create_header,
+ Cv_iplAllocateImageData allocate_data,
+ Cv_iplDeallocate deallocate,
+ Cv_iplCreateROI create_roi,
+ Cv_iplCloneImage clone_image );
+
+#define CV_TURN_ON_IPL_COMPATIBILITY() \
+ cvSetIPLAllocators( iplCreateImageHeader, iplAllocateImage, \
+ iplDeallocate, iplCreateROI, iplCloneImage )
+
+/****************************************************************************************\
+* Data Persistence *
+\****************************************************************************************/
+
+/********************************** High-level functions ********************************/
+
+/* opens existing or creates new file storage */
+CVAPI(CvFileStorage*) cvOpenFileStorage( const char* filename, CvMemStorage* memstorage,
+ int flags, const char* encoding CV_DEFAULT(NULL) );
+
+/* closes file storage and deallocates buffers */
+CVAPI(void) cvReleaseFileStorage( CvFileStorage** fs );
+
+/* returns attribute value or 0 (NULL) if there is no such attribute */
+CVAPI(const char*) cvAttrValue( const CvAttrList* attr, const char* attr_name );
+
+/* starts writing compound structure (map or sequence) */
+CVAPI(void) cvStartWriteStruct( CvFileStorage* fs, const char* name,
+ int struct_flags, const char* type_name CV_DEFAULT(NULL),
+ CvAttrList attributes CV_DEFAULT(cvAttrList()));
+
+/* finishes writing compound structure */
+CVAPI(void) cvEndWriteStruct( CvFileStorage* fs );
+
+/* writes an integer */
+CVAPI(void) cvWriteInt( CvFileStorage* fs, const char* name, int value );
+
+/* writes a floating-point number */
+CVAPI(void) cvWriteReal( CvFileStorage* fs, const char* name, double value );
+
+/* writes a string */
+CVAPI(void) cvWriteString( CvFileStorage* fs, const char* name,
+ const char* str, int quote CV_DEFAULT(0) );
+
+/* writes a comment */
+CVAPI(void) cvWriteComment( CvFileStorage* fs, const char* comment,
+ int eol_comment );
+
+/* writes instance of a standard type (matrix, image, sequence, graph etc.)
+ or user-defined type */
+CVAPI(void) cvWrite( CvFileStorage* fs, const char* name, const void* ptr,
+ CvAttrList attributes CV_DEFAULT(cvAttrList()));
+
+/* starts the next stream */
+CVAPI(void) cvStartNextStream( CvFileStorage* fs );
+
+/* helper function: writes multiple integer or floating-point numbers */
+CVAPI(void) cvWriteRawData( CvFileStorage* fs, const void* src,
+ int len, const char* dt );
+
+/* returns the hash entry corresponding to the specified literal key string or 0
+ if there is no such a key in the storage */
+CVAPI(CvStringHashNode*) cvGetHashedKey( CvFileStorage* fs, const char* name,
+ int len CV_DEFAULT(-1),
+ int create_missing CV_DEFAULT(0));
+
+/* returns file node with the specified key within the specified map
+ (collection of named nodes) */
+CVAPI(CvFileNode*) cvGetRootFileNode( const CvFileStorage* fs,
+ int stream_index CV_DEFAULT(0) );
+
+/* returns file node with the specified key within the specified map
+ (collection of named nodes) */
+CVAPI(CvFileNode*) cvGetFileNode( CvFileStorage* fs, CvFileNode* map,
+ const CvStringHashNode* key,
+ int create_missing CV_DEFAULT(0) );
+
+/* this is a slower version of cvGetFileNode that takes the key as a literal string */
+CVAPI(CvFileNode*) cvGetFileNodeByName( const CvFileStorage* fs,
+ const CvFileNode* map,
+ const char* name );
+
+CV_INLINE int cvReadInt( const CvFileNode* node, int default_value CV_DEFAULT(0) )
+{
+ return !node ? default_value :
+ CV_NODE_IS_INT(node->tag) ? node->data.i :
+ CV_NODE_IS_REAL(node->tag) ? cvRound(node->data.f) : 0x7fffffff;
+}
+
+
+CV_INLINE int cvReadIntByName( const CvFileStorage* fs, const CvFileNode* map,
+ const char* name, int default_value CV_DEFAULT(0) )
+{
+ return cvReadInt( cvGetFileNodeByName( fs, map, name ), default_value );
+}
+
+
+CV_INLINE double cvReadReal( const CvFileNode* node, double default_value CV_DEFAULT(0.) )
+{
+ return !node ? default_value :
+ CV_NODE_IS_INT(node->tag) ? (double)node->data.i :
+ CV_NODE_IS_REAL(node->tag) ? node->data.f : 1e300;
+}
+
+
+CV_INLINE double cvReadRealByName( const CvFileStorage* fs, const CvFileNode* map,
+ const char* name, double default_value CV_DEFAULT(0.) )
+{
+ return cvReadReal( cvGetFileNodeByName( fs, map, name ), default_value );
+}
+
+
+CV_INLINE const char* cvReadString( const CvFileNode* node,
+ const char* default_value CV_DEFAULT(NULL) )
+{
+ return !node ? default_value : CV_NODE_IS_STRING(node->tag) ? node->data.str.ptr : 0;
+}
+
+
+CV_INLINE const char* cvReadStringByName( const CvFileStorage* fs, const CvFileNode* map,
+ const char* name, const char* default_value CV_DEFAULT(NULL) )
+{
+ return cvReadString( cvGetFileNodeByName( fs, map, name ), default_value );
+}
+
+
+/* decodes standard or user-defined object and returns it */
+CVAPI(void*) cvRead( CvFileStorage* fs, CvFileNode* node,
+ CvAttrList* attributes CV_DEFAULT(NULL));
+
+/* decodes standard or user-defined object and returns it */
+CV_INLINE void* cvReadByName( CvFileStorage* fs, const CvFileNode* map,
+ const char* name, CvAttrList* attributes CV_DEFAULT(NULL) )
+{
+ return cvRead( fs, cvGetFileNodeByName( fs, map, name ), attributes );
+}
+
+
+/* starts reading data from sequence or scalar numeric node */
+CVAPI(void) cvStartReadRawData( const CvFileStorage* fs, const CvFileNode* src,
+ CvSeqReader* reader );
+
+/* reads multiple numbers and stores them to array */
+CVAPI(void) cvReadRawDataSlice( const CvFileStorage* fs, CvSeqReader* reader,
+ int count, void* dst, const char* dt );
+
+/* combination of two previous functions for easier reading of whole sequences */
+CVAPI(void) cvReadRawData( const CvFileStorage* fs, const CvFileNode* src,
+ void* dst, const char* dt );
+
+/* writes a copy of file node to file storage */
+CVAPI(void) cvWriteFileNode( CvFileStorage* fs, const char* new_node_name,
+ const CvFileNode* node, int embed );
+
+/* returns name of file node */
+CVAPI(const char*) cvGetFileNodeName( const CvFileNode* node );
+
+/*********************************** Adding own types ***********************************/
+
+CVAPI(void) cvRegisterType( const CvTypeInfo* info );
+CVAPI(void) cvUnregisterType( const char* type_name );
+CVAPI(CvTypeInfo*) cvFirstType(void);
+CVAPI(CvTypeInfo*) cvFindType( const char* type_name );
+CVAPI(CvTypeInfo*) cvTypeOf( const void* struct_ptr );
+
+/* universal functions */
+CVAPI(void) cvRelease( void** struct_ptr );
+CVAPI(void*) cvClone( const void* struct_ptr );
+
+/* simple API for reading/writing data */
+CVAPI(void) cvSave( const char* filename, const void* struct_ptr,
+ const char* name CV_DEFAULT(NULL),
+ const char* comment CV_DEFAULT(NULL),
+ CvAttrList attributes CV_DEFAULT(cvAttrList()));
+CVAPI(void*) cvLoad( const char* filename,
+ CvMemStorage* memstorage CV_DEFAULT(NULL),
+ const char* name CV_DEFAULT(NULL),
+ const char** real_name CV_DEFAULT(NULL) );
+
+/*********************************** Measuring Execution Time ***************************/
+
+/* helper functions for RNG initialization and accurate time measurement:
+ uses internal clock counter on x86 */
+CVAPI(int64) cvGetTickCount( void );
+CVAPI(double) cvGetTickFrequency( void );
+
+/*********************************** CPU capabilities ***********************************/
+
+#define CV_CPU_NONE 0
+#define CV_CPU_MMX 1
+#define CV_CPU_SSE 2
+#define CV_CPU_SSE2 3
+#define CV_CPU_SSE3 4
+#define CV_CPU_SSSE3 5
+#define CV_CPU_SSE4_1 6
+#define CV_CPU_SSE4_2 7
+#define CV_CPU_POPCNT 8
+#define CV_CPU_AVX 10
+#define CV_CPU_AVX2 11
+#define CV_HARDWARE_MAX_FEATURE 255
+
+CVAPI(int) cvCheckHardwareSupport(int feature);
+
+/*********************************** Multi-Threading ************************************/
+
+/* retrieve/set the number of threads used in OpenMP implementations */
+CVAPI(int) cvGetNumThreads( void );
+CVAPI(void) cvSetNumThreads( int threads CV_DEFAULT(0) );
+/* get index of the thread being executed */
+CVAPI(int) cvGetThreadNum( void );
+
+
+/********************************** Error Handling **************************************/
+
+/* Get current OpenCV error status */
+CVAPI(int) cvGetErrStatus( void );
+
+/* Sets error status silently */
+CVAPI(void) cvSetErrStatus( int status );
+
+#define CV_ErrModeLeaf 0 /* Print error and exit program */
+#define CV_ErrModeParent 1 /* Print error and continue */
+#define CV_ErrModeSilent 2 /* Don't print and continue */
+
+/* Retrives current error processing mode */
+CVAPI(int) cvGetErrMode( void );
+
+/* Sets error processing mode, returns previously used mode */
+CVAPI(int) cvSetErrMode( int mode );
+
+/* Sets error status and performs some additonal actions (displaying message box,
+ writing message to stderr, terminating application etc.)
+ depending on the current error mode */
+CVAPI(void) cvError( int status, const char* func_name,
+ const char* err_msg, const char* file_name, int line );
+
+/* Retrieves textual description of the error given its code */
+CVAPI(const char*) cvErrorStr( int status );
+
+/* Retrieves detailed information about the last error occured */
+CVAPI(int) cvGetErrInfo( const char** errcode_desc, const char** description,
+ const char** filename, int* line );
+
+/* Maps IPP error codes to the counterparts from OpenCV */
+CVAPI(int) cvErrorFromIppStatus( int ipp_status );
+
+typedef int (CV_CDECL *CvErrorCallback)( int status, const char* func_name,
+ const char* err_msg, const char* file_name, int line, void* userdata );
+
+/* Assigns a new error-handling function */
+CVAPI(CvErrorCallback) cvRedirectError( CvErrorCallback error_handler,
+ void* userdata CV_DEFAULT(NULL),
+ void** prev_userdata CV_DEFAULT(NULL) );
+
+/*
+ Output to:
+ cvNulDevReport - nothing
+ cvStdErrReport - console(fprintf(stderr,...))
+ cvGuiBoxReport - MessageBox(WIN32)
+ */
+CVAPI(int) cvNulDevReport( int status, const char* func_name, const char* err_msg,
+ const char* file_name, int line, void* userdata );
+
+CVAPI(int) cvStdErrReport( int status, const char* func_name, const char* err_msg,
+ const char* file_name, int line, void* userdata );
+
+CVAPI(int) cvGuiBoxReport( int status, const char* func_name, const char* err_msg,
+ const char* file_name, int line, void* userdata );
+
+#define OPENCV_ERROR(status,func,context) \
+cvError((status),(func),(context),__FILE__,__LINE__)
+
+#define OPENCV_ERRCHK(func,context) \
+{if (cvGetErrStatus() >= 0) \
+{OPENCV_ERROR(CV_StsBackTrace,(func),(context));}}
+
+#define OPENCV_ASSERT(expr,func,context) \
+{if (! (expr)) \
+{OPENCV_ERROR(CV_StsInternal,(func),(context));}}
+
+#define OPENCV_RSTERR() (cvSetErrStatus(CV_StsOk))
+
+#define OPENCV_CALL( Func ) \
+{ \
+Func; \
+}
+
+
+/* CV_FUNCNAME macro defines icvFuncName constant which is used by CV_ERROR macro */
+#ifdef CV_NO_FUNC_NAMES
+#define CV_FUNCNAME( Name )
+#define cvFuncName ""
+#else
+#define CV_FUNCNAME( Name ) \
+static char cvFuncName[] = Name
+#endif
+
+
+/*
+ CV_ERROR macro unconditionally raises error with passed code and message.
+ After raising error, control will be transferred to the exit label.
+ */
+#define CV_ERROR( Code, Msg ) \
+{ \
+ cvError( (Code), cvFuncName, Msg, __FILE__, __LINE__ ); \
+ __CV_EXIT__; \
+}
+
+/* Simplified form of CV_ERROR */
+#define CV_ERROR_FROM_CODE( code ) \
+ CV_ERROR( code, "" )
+
+/*
+ CV_CHECK macro checks error status after CV (or IPL)
+ function call. If error detected, control will be transferred to the exit
+ label.
+ */
+#define CV_CHECK() \
+{ \
+ if( cvGetErrStatus() < 0 ) \
+ CV_ERROR( CV_StsBackTrace, "Inner function failed." ); \
+}
+
+
+/*
+ CV_CALL macro calls CV (or IPL) function, checks error status and
+ signals a error if the function failed. Useful in "parent node"
+ error procesing mode
+ */
+#define CV_CALL( Func ) \
+{ \
+ Func; \
+ CV_CHECK(); \
+}
+
+
+/* Runtime assertion macro */
+#define CV_ASSERT( Condition ) \
+{ \
+ if( !(Condition) ) \
+ CV_ERROR( CV_StsInternal, "Assertion: " #Condition " failed" ); \
+}
+
+#define __CV_BEGIN__ {
+#define __CV_END__ goto exit; exit: ; }
+#define __CV_EXIT__ goto exit
+
+#ifdef __cplusplus
+}
+
+// classes for automatic module/RTTI data registration/unregistration
+struct CV_EXPORTS CvModule
+{
+ CvModule( CvModuleInfo* _info );
+ ~CvModule();
+ CvModuleInfo* info;
+
+ static CvModuleInfo* first;
+ static CvModuleInfo* last;
+};
+
+struct CV_EXPORTS CvType
+{
+ CvType( const char* type_name,
+ CvIsInstanceFunc is_instance, CvReleaseFunc release=0,
+ CvReadFunc read=0, CvWriteFunc write=0, CvCloneFunc clone=0 );
+ ~CvType();
+ CvTypeInfo* info;
+
+ static CvTypeInfo* first;
+ static CvTypeInfo* last;
+};
+
+#endif
+
+#endif
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/cuda_devptrs.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/cuda_devptrs.hpp
new file mode 100644
index 00000000..15340455
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/cuda_devptrs.hpp
@@ -0,0 +1,199 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_CORE_DEVPTRS_HPP__
+#define __OPENCV_CORE_DEVPTRS_HPP__
+
+#ifdef __cplusplus
+
+#ifdef __CUDACC__
+ #define __CV_GPU_HOST_DEVICE__ __host__ __device__ __forceinline__
+#else
+ #define __CV_GPU_HOST_DEVICE__
+#endif
+
+namespace cv
+{
+ namespace gpu
+ {
+ // Simple lightweight structures that encapsulates information about an image on device.
+ // It is intended to pass to nvcc-compiled code. GpuMat depends on headers that nvcc can't compile
+
+ template <bool expr> struct StaticAssert;
+ template <> struct StaticAssert<true> {static __CV_GPU_HOST_DEVICE__ void check(){}};
+
+ template<typename T> struct DevPtr
+ {
+ typedef T elem_type;
+ typedef int index_type;
+
+ enum { elem_size = sizeof(elem_type) };
+
+ T* data;
+
+ __CV_GPU_HOST_DEVICE__ DevPtr() : data(0) {}
+ __CV_GPU_HOST_DEVICE__ DevPtr(T* data_) : data(data_) {}
+
+ __CV_GPU_HOST_DEVICE__ size_t elemSize() const { return elem_size; }
+ __CV_GPU_HOST_DEVICE__ operator T*() { return data; }
+ __CV_GPU_HOST_DEVICE__ operator const T*() const { return data; }
+ };
+
+ template<typename T> struct PtrSz : public DevPtr<T>
+ {
+ __CV_GPU_HOST_DEVICE__ PtrSz() : size(0) {}
+ __CV_GPU_HOST_DEVICE__ PtrSz(T* data_, size_t size_) : DevPtr<T>(data_), size(size_) {}
+
+ size_t size;
+ };
+
+ template<typename T> struct PtrStep : public DevPtr<T>
+ {
+ __CV_GPU_HOST_DEVICE__ PtrStep() : step(0) {}
+ __CV_GPU_HOST_DEVICE__ PtrStep(T* data_, size_t step_) : DevPtr<T>(data_), step(step_) {}
+
+ /** \brief stride between two consecutive rows in bytes. Step is stored always and everywhere in bytes!!! */
+ size_t step;
+
+ __CV_GPU_HOST_DEVICE__ T* ptr(int y = 0) { return ( T*)( ( char*)DevPtr<T>::data + y * step); }
+ __CV_GPU_HOST_DEVICE__ const T* ptr(int y = 0) const { return (const T*)( (const char*)DevPtr<T>::data + y * step); }
+
+ __CV_GPU_HOST_DEVICE__ T& operator ()(int y, int x) { return ptr(y)[x]; }
+ __CV_GPU_HOST_DEVICE__ const T& operator ()(int y, int x) const { return ptr(y)[x]; }
+ };
+
+ template <typename T> struct PtrStepSz : public PtrStep<T>
+ {
+ __CV_GPU_HOST_DEVICE__ PtrStepSz() : cols(0), rows(0) {}
+ __CV_GPU_HOST_DEVICE__ PtrStepSz(int rows_, int cols_, T* data_, size_t step_)
+ : PtrStep<T>(data_, step_), cols(cols_), rows(rows_) {}
+
+ template <typename U>
+ explicit PtrStepSz(const PtrStepSz<U>& d) : PtrStep<T>((T*)d.data, d.step), cols(d.cols), rows(d.rows){}
+
+ int cols;
+ int rows;
+ };
+
+ typedef PtrStepSz<unsigned char> PtrStepSzb;
+ typedef PtrStepSz<float> PtrStepSzf;
+ typedef PtrStepSz<int> PtrStepSzi;
+
+ typedef PtrStep<unsigned char> PtrStepb;
+ typedef PtrStep<float> PtrStepf;
+ typedef PtrStep<int> PtrStepi;
+
+
+#if defined __GNUC__
+ #define __CV_GPU_DEPR_BEFORE__
+ #define __CV_GPU_DEPR_AFTER__ __attribute__ ((deprecated))
+#elif defined(__MSVC__) //|| defined(__CUDACC__)
+ #pragma deprecated(DevMem2D_)
+ #define __CV_GPU_DEPR_BEFORE__ __declspec(deprecated)
+ #define __CV_GPU_DEPR_AFTER__
+#else
+ #define __CV_GPU_DEPR_BEFORE__
+ #define __CV_GPU_DEPR_AFTER__
+#endif
+
+ template <typename T> struct __CV_GPU_DEPR_BEFORE__ DevMem2D_ : public PtrStepSz<T>
+ {
+ DevMem2D_() {}
+ DevMem2D_(int rows_, int cols_, T* data_, size_t step_) : PtrStepSz<T>(rows_, cols_, data_, step_) {}
+
+ template <typename U>
+ explicit __CV_GPU_DEPR_BEFORE__ DevMem2D_(const DevMem2D_<U>& d) : PtrStepSz<T>(d.rows, d.cols, (T*)d.data, d.step) {}
+ } __CV_GPU_DEPR_AFTER__ ;
+
+ typedef DevMem2D_<unsigned char> DevMem2Db;
+ typedef DevMem2Db DevMem2D;
+ typedef DevMem2D_<float> DevMem2Df;
+ typedef DevMem2D_<int> DevMem2Di;
+
+ template<typename T> struct PtrElemStep_ : public PtrStep<T>
+ {
+ PtrElemStep_(const DevMem2D_<T>& mem) : PtrStep<T>(mem.data, mem.step)
+ {
+ StaticAssert<256 % sizeof(T) == 0>::check();
+
+ PtrStep<T>::step /= PtrStep<T>::elem_size;
+ }
+ __CV_GPU_HOST_DEVICE__ T* ptr(int y = 0) { return PtrStep<T>::data + y * PtrStep<T>::step; }
+ __CV_GPU_HOST_DEVICE__ const T* ptr(int y = 0) const { return PtrStep<T>::data + y * PtrStep<T>::step; }
+
+ __CV_GPU_HOST_DEVICE__ T& operator ()(int y, int x) { return ptr(y)[x]; }
+ __CV_GPU_HOST_DEVICE__ const T& operator ()(int y, int x) const { return ptr(y)[x]; }
+ };
+
+ template<typename T> struct PtrStep_ : public PtrStep<T>
+ {
+ PtrStep_() {}
+ PtrStep_(const DevMem2D_<T>& mem) : PtrStep<T>(mem.data, mem.step) {}
+ };
+
+ typedef PtrElemStep_<unsigned char> PtrElemStep;
+ typedef PtrElemStep_<float> PtrElemStepf;
+ typedef PtrElemStep_<int> PtrElemStepi;
+
+//#undef __CV_GPU_DEPR_BEFORE__
+//#undef __CV_GPU_DEPR_AFTER__
+
+ namespace device
+ {
+ using cv::gpu::PtrSz;
+ using cv::gpu::PtrStep;
+ using cv::gpu::PtrStepSz;
+
+ using cv::gpu::PtrStepSzb;
+ using cv::gpu::PtrStepSzf;
+ using cv::gpu::PtrStepSzi;
+
+ using cv::gpu::PtrStepb;
+ using cv::gpu::PtrStepf;
+ using cv::gpu::PtrStepi;
+ }
+ }
+}
+
+#endif // __cplusplus
+
+#endif /* __OPENCV_CORE_DEVPTRS_HPP__ */
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/devmem2d.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/devmem2d.hpp
new file mode 100644
index 00000000..18dfcd8a
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/devmem2d.hpp
@@ -0,0 +1,43 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#include "opencv2/core/cuda_devptrs.hpp"
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/eigen.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/eigen.hpp
new file mode 100644
index 00000000..a7b237f9
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/eigen.hpp
@@ -0,0 +1,280 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_CORE_EIGEN_HPP__
+#define __OPENCV_CORE_EIGEN_HPP__
+
+#ifdef __cplusplus
+
+#include "opencv2/core/core_c.h"
+#include "opencv2/core/core.hpp"
+
+#if defined _MSC_VER && _MSC_VER >= 1200
+#pragma warning( disable: 4714 ) //__forceinline is not inlined
+#pragma warning( disable: 4127 ) //conditional expression is constant
+#pragma warning( disable: 4244 ) //conversion from '__int64' to 'int', possible loss of data
+#endif
+
+namespace cv
+{
+
+template<typename _Tp, int _rows, int _cols, int _options, int _maxRows, int _maxCols>
+void eigen2cv( const Eigen::Matrix<_Tp, _rows, _cols, _options, _maxRows, _maxCols>& src, Mat& dst )
+{
+ if( !(src.Flags & Eigen::RowMajorBit) )
+ {
+ Mat _src(src.cols(), src.rows(), DataType<_Tp>::type,
+ (void*)src.data(), src.stride()*sizeof(_Tp));
+ transpose(_src, dst);
+ }
+ else
+ {
+ Mat _src(src.rows(), src.cols(), DataType<_Tp>::type,
+ (void*)src.data(), src.stride()*sizeof(_Tp));
+ _src.copyTo(dst);
+ }
+}
+
+template<typename _Tp, int _rows, int _cols, int _options, int _maxRows, int _maxCols>
+void cv2eigen( const Mat& src,
+ Eigen::Matrix<_Tp, _rows, _cols, _options, _maxRows, _maxCols>& dst )
+{
+ CV_DbgAssert(src.rows == _rows && src.cols == _cols);
+ if( !(dst.Flags & Eigen::RowMajorBit) )
+ {
+ Mat _dst(src.cols, src.rows, DataType<_Tp>::type,
+ dst.data(), (size_t)(dst.stride()*sizeof(_Tp)));
+ if( src.type() == _dst.type() )
+ transpose(src, _dst);
+ else if( src.cols == src.rows )
+ {
+ src.convertTo(_dst, _dst.type());
+ transpose(_dst, _dst);
+ }
+ else
+ Mat(src.t()).convertTo(_dst, _dst.type());
+ CV_DbgAssert(_dst.data == (uchar*)dst.data());
+ }
+ else
+ {
+ Mat _dst(src.rows, src.cols, DataType<_Tp>::type,
+ dst.data(), (size_t)(dst.stride()*sizeof(_Tp)));
+ src.convertTo(_dst, _dst.type());
+ CV_DbgAssert(_dst.data == (uchar*)dst.data());
+ }
+}
+
+// Matx case
+template<typename _Tp, int _rows, int _cols, int _options, int _maxRows, int _maxCols>
+void cv2eigen( const Matx<_Tp, _rows, _cols>& src,
+ Eigen::Matrix<_Tp, _rows, _cols, _options, _maxRows, _maxCols>& dst )
+{
+ if( !(dst.Flags & Eigen::RowMajorBit) )
+ {
+ Mat _dst(_cols, _rows, DataType<_Tp>::type,
+ dst.data(), (size_t)(dst.stride()*sizeof(_Tp)));
+ transpose(src, _dst);
+ CV_DbgAssert(_dst.data == (uchar*)dst.data());
+ }
+ else
+ {
+ Mat _dst(_rows, _cols, DataType<_Tp>::type,
+ dst.data(), (size_t)(dst.stride()*sizeof(_Tp)));
+ Mat(src).copyTo(_dst);
+ CV_DbgAssert(_dst.data == (uchar*)dst.data());
+ }
+}
+
+template<typename _Tp>
+void cv2eigen( const Mat& src,
+ Eigen::Matrix<_Tp, Eigen::Dynamic, Eigen::Dynamic>& dst )
+{
+ dst.resize(src.rows, src.cols);
+ if( !(dst.Flags & Eigen::RowMajorBit) )
+ {
+ Mat _dst(src.cols, src.rows, DataType<_Tp>::type,
+ dst.data(), (size_t)(dst.stride()*sizeof(_Tp)));
+ if( src.type() == _dst.type() )
+ transpose(src, _dst);
+ else if( src.cols == src.rows )
+ {
+ src.convertTo(_dst, _dst.type());
+ transpose(_dst, _dst);
+ }
+ else
+ Mat(src.t()).convertTo(_dst, _dst.type());
+ CV_DbgAssert(_dst.data == (uchar*)dst.data());
+ }
+ else
+ {
+ Mat _dst(src.rows, src.cols, DataType<_Tp>::type,
+ dst.data(), (size_t)(dst.stride()*sizeof(_Tp)));
+ src.convertTo(_dst, _dst.type());
+ CV_DbgAssert(_dst.data == (uchar*)dst.data());
+ }
+}
+
+// Matx case
+template<typename _Tp, int _rows, int _cols>
+void cv2eigen( const Matx<_Tp, _rows, _cols>& src,
+ Eigen::Matrix<_Tp, Eigen::Dynamic, Eigen::Dynamic>& dst )
+{
+ dst.resize(_rows, _cols);
+ if( !(dst.Flags & Eigen::RowMajorBit) )
+ {
+ Mat _dst(_cols, _rows, DataType<_Tp>::type,
+ dst.data(), (size_t)(dst.stride()*sizeof(_Tp)));
+ transpose(src, _dst);
+ CV_DbgAssert(_dst.data == (uchar*)dst.data());
+ }
+ else
+ {
+ Mat _dst(_rows, _cols, DataType<_Tp>::type,
+ dst.data(), (size_t)(dst.stride()*sizeof(_Tp)));
+ Mat(src).copyTo(_dst);
+ CV_DbgAssert(_dst.data == (uchar*)dst.data());
+ }
+}
+
+template<typename _Tp>
+void cv2eigen( const Mat& src,
+ Eigen::Matrix<_Tp, Eigen::Dynamic, 1>& dst )
+{
+ CV_Assert(src.cols == 1);
+ dst.resize(src.rows);
+
+ if( !(dst.Flags & Eigen::RowMajorBit) )
+ {
+ Mat _dst(src.cols, src.rows, DataType<_Tp>::type,
+ dst.data(), (size_t)(dst.stride()*sizeof(_Tp)));
+ if( src.type() == _dst.type() )
+ transpose(src, _dst);
+ else
+ Mat(src.t()).convertTo(_dst, _dst.type());
+ CV_DbgAssert(_dst.data == (uchar*)dst.data());
+ }
+ else
+ {
+ Mat _dst(src.rows, src.cols, DataType<_Tp>::type,
+ dst.data(), (size_t)(dst.stride()*sizeof(_Tp)));
+ src.convertTo(_dst, _dst.type());
+ CV_DbgAssert(_dst.data == (uchar*)dst.data());
+ }
+}
+
+// Matx case
+template<typename _Tp, int _rows>
+void cv2eigen( const Matx<_Tp, _rows, 1>& src,
+ Eigen::Matrix<_Tp, Eigen::Dynamic, 1>& dst )
+{
+ dst.resize(_rows);
+
+ if( !(dst.Flags & Eigen::RowMajorBit) )
+ {
+ Mat _dst(1, _rows, DataType<_Tp>::type,
+ dst.data(), (size_t)(dst.stride()*sizeof(_Tp)));
+ transpose(src, _dst);
+ CV_DbgAssert(_dst.data == (uchar*)dst.data());
+ }
+ else
+ {
+ Mat _dst(_rows, 1, DataType<_Tp>::type,
+ dst.data(), (size_t)(dst.stride()*sizeof(_Tp)));
+ src.copyTo(_dst);
+ CV_DbgAssert(_dst.data == (uchar*)dst.data());
+ }
+}
+
+
+template<typename _Tp>
+void cv2eigen( const Mat& src,
+ Eigen::Matrix<_Tp, 1, Eigen::Dynamic>& dst )
+{
+ CV_Assert(src.rows == 1);
+ dst.resize(src.cols);
+ if( !(dst.Flags & Eigen::RowMajorBit) )
+ {
+ Mat _dst(src.cols, src.rows, DataType<_Tp>::type,
+ dst.data(), (size_t)(dst.stride()*sizeof(_Tp)));
+ if( src.type() == _dst.type() )
+ transpose(src, _dst);
+ else
+ Mat(src.t()).convertTo(_dst, _dst.type());
+ CV_DbgAssert(_dst.data == (uchar*)dst.data());
+ }
+ else
+ {
+ Mat _dst(src.rows, src.cols, DataType<_Tp>::type,
+ dst.data(), (size_t)(dst.stride()*sizeof(_Tp)));
+ src.convertTo(_dst, _dst.type());
+ CV_DbgAssert(_dst.data == (uchar*)dst.data());
+ }
+}
+
+//Matx
+template<typename _Tp, int _cols>
+void cv2eigen( const Matx<_Tp, 1, _cols>& src,
+ Eigen::Matrix<_Tp, 1, Eigen::Dynamic>& dst )
+{
+ dst.resize(_cols);
+ if( !(dst.Flags & Eigen::RowMajorBit) )
+ {
+ Mat _dst(_cols, 1, DataType<_Tp>::type,
+ dst.data(), (size_t)(dst.stride()*sizeof(_Tp)));
+ transpose(src, _dst);
+ CV_DbgAssert(_dst.data == (uchar*)dst.data());
+ }
+ else
+ {
+ Mat _dst(1, _cols, DataType<_Tp>::type,
+ dst.data(), (size_t)(dst.stride()*sizeof(_Tp)));
+ Mat(src).copyTo(_dst);
+ CV_DbgAssert(_dst.data == (uchar*)dst.data());
+ }
+}
+
+
+}
+
+#endif
+
+#endif
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/gpumat.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/gpumat.hpp
new file mode 100644
index 00000000..68647d9b
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/gpumat.hpp
@@ -0,0 +1,564 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_GPUMAT_HPP__
+#define __OPENCV_GPUMAT_HPP__
+
+#ifdef __cplusplus
+
+#include "opencv2/core/core.hpp"
+#include "opencv2/core/cuda_devptrs.hpp"
+
+namespace cv { namespace gpu
+{
+ //////////////////////////////// Initialization & Info ////////////////////////
+
+ //! This is the only function that do not throw exceptions if the library is compiled without Cuda.
+ CV_EXPORTS int getCudaEnabledDeviceCount();
+
+ //! Functions below throw cv::Expception if the library is compiled without Cuda.
+
+ CV_EXPORTS void setDevice(int device);
+ CV_EXPORTS int getDevice();
+
+ //! Explicitly destroys and cleans up all resources associated with the current device in the current process.
+ //! Any subsequent API call to this device will reinitialize the device.
+ CV_EXPORTS void resetDevice();
+
+ enum FeatureSet
+ {
+ FEATURE_SET_COMPUTE_10 = 10,
+ FEATURE_SET_COMPUTE_11 = 11,
+ FEATURE_SET_COMPUTE_12 = 12,
+ FEATURE_SET_COMPUTE_13 = 13,
+ FEATURE_SET_COMPUTE_20 = 20,
+ FEATURE_SET_COMPUTE_21 = 21,
+ FEATURE_SET_COMPUTE_30 = 30,
+ FEATURE_SET_COMPUTE_35 = 35,
+
+ GLOBAL_ATOMICS = FEATURE_SET_COMPUTE_11,
+ SHARED_ATOMICS = FEATURE_SET_COMPUTE_12,
+ NATIVE_DOUBLE = FEATURE_SET_COMPUTE_13,
+ WARP_SHUFFLE_FUNCTIONS = FEATURE_SET_COMPUTE_30,
+ DYNAMIC_PARALLELISM = FEATURE_SET_COMPUTE_35
+ };
+
+ // Checks whether current device supports the given feature
+ CV_EXPORTS bool deviceSupports(FeatureSet feature_set);
+
+ // Gives information about what GPU archs this OpenCV GPU module was
+ // compiled for
+ class CV_EXPORTS TargetArchs
+ {
+ public:
+ static bool builtWith(FeatureSet feature_set);
+ static bool has(int major, int minor);
+ static bool hasPtx(int major, int minor);
+ static bool hasBin(int major, int minor);
+ static bool hasEqualOrLessPtx(int major, int minor);
+ static bool hasEqualOrGreater(int major, int minor);
+ static bool hasEqualOrGreaterPtx(int major, int minor);
+ static bool hasEqualOrGreaterBin(int major, int minor);
+ private:
+ TargetArchs();
+ };
+
+ // Gives information about the given GPU
+ class CV_EXPORTS DeviceInfo
+ {
+ public:
+ // Creates DeviceInfo object for the current GPU
+ DeviceInfo() : device_id_(getDevice()) { query(); }
+
+ // Creates DeviceInfo object for the given GPU
+ DeviceInfo(int device_id) : device_id_(device_id) { query(); }
+
+ std::string name() const { return name_; }
+
+ // Return compute capability versions
+ int majorVersion() const { return majorVersion_; }
+ int minorVersion() const { return minorVersion_; }
+
+ int multiProcessorCount() const { return multi_processor_count_; }
+
+ size_t sharedMemPerBlock() const;
+
+ void queryMemory(size_t& totalMemory, size_t& freeMemory) const;
+ size_t freeMemory() const;
+ size_t totalMemory() const;
+
+ // Checks whether device supports the given feature
+ bool supports(FeatureSet feature_set) const;
+
+ // Checks whether the GPU module can be run on the given device
+ bool isCompatible() const;
+
+ int deviceID() const { return device_id_; }
+
+ private:
+ void query();
+
+ int device_id_;
+
+ std::string name_;
+ int multi_processor_count_;
+ int majorVersion_;
+ int minorVersion_;
+ };
+
+ CV_EXPORTS void printCudaDeviceInfo(int device);
+ CV_EXPORTS void printShortCudaDeviceInfo(int device);
+
+ //////////////////////////////// GpuMat ///////////////////////////////
+
+ //! Smart pointer for GPU memory with reference counting. Its interface is mostly similar with cv::Mat.
+ class CV_EXPORTS GpuMat
+ {
+ public:
+ //! default constructor
+ GpuMat();
+
+ //! constructs GpuMatrix of the specified size and type (_type is CV_8UC1, CV_64FC3, CV_32SC(12) etc.)
+ GpuMat(int rows, int cols, int type);
+ GpuMat(Size size, int type);
+
+ //! constucts GpuMatrix and fills it with the specified value _s.
+ GpuMat(int rows, int cols, int type, Scalar s);
+ GpuMat(Size size, int type, Scalar s);
+
+ //! copy constructor
+ GpuMat(const GpuMat& m);
+
+ //! constructor for GpuMatrix headers pointing to user-allocated data
+ GpuMat(int rows, int cols, int type, void* data, size_t step = Mat::AUTO_STEP);
+ GpuMat(Size size, int type, void* data, size_t step = Mat::AUTO_STEP);
+
+ //! creates a matrix header for a part of the bigger matrix
+ GpuMat(const GpuMat& m, Range rowRange, Range colRange);
+ GpuMat(const GpuMat& m, Rect roi);
+
+ //! builds GpuMat from Mat. Perfom blocking upload to device.
+ explicit GpuMat(const Mat& m);
+
+ //! destructor - calls release()
+ ~GpuMat();
+
+ //! assignment operators
+ GpuMat& operator = (const GpuMat& m);
+
+ //! pefroms blocking upload data to GpuMat.
+ void upload(const Mat& m);
+
+ //! downloads data from device to host memory. Blocking calls.
+ void download(Mat& m) const;
+
+ //! returns a new GpuMatrix header for the specified row
+ GpuMat row(int y) const;
+ //! returns a new GpuMatrix header for the specified column
+ GpuMat col(int x) const;
+ //! ... for the specified row span
+ GpuMat rowRange(int startrow, int endrow) const;
+ GpuMat rowRange(Range r) const;
+ //! ... for the specified column span
+ GpuMat colRange(int startcol, int endcol) const;
+ GpuMat colRange(Range r) const;
+
+ //! returns deep copy of the GpuMatrix, i.e. the data is copied
+ GpuMat clone() const;
+ //! copies the GpuMatrix content to "m".
+ // It calls m.create(this->size(), this->type()).
+ void copyTo(GpuMat& m) const;
+ //! copies those GpuMatrix elements to "m" that are marked with non-zero mask elements.
+ void copyTo(GpuMat& m, const GpuMat& mask) const;
+ //! converts GpuMatrix to another datatype with optional scalng. See cvConvertScale.
+ void convertTo(GpuMat& m, int rtype, double alpha = 1, double beta = 0) const;
+
+ void assignTo(GpuMat& m, int type=-1) const;
+
+ //! sets every GpuMatrix element to s
+ GpuMat& operator = (Scalar s);
+ //! sets some of the GpuMatrix elements to s, according to the mask
+ GpuMat& setTo(Scalar s, const GpuMat& mask = GpuMat());
+ //! creates alternative GpuMatrix header for the same data, with different
+ // number of channels and/or different number of rows. see cvReshape.
+ GpuMat reshape(int cn, int rows = 0) const;
+
+ //! allocates new GpuMatrix data unless the GpuMatrix already has specified size and type.
+ // previous data is unreferenced if needed.
+ void create(int rows, int cols, int type);
+ void create(Size size, int type);
+ //! decreases reference counter;
+ // deallocate the data when reference counter reaches 0.
+ void release();
+
+ //! swaps with other smart pointer
+ void swap(GpuMat& mat);
+
+ //! locates GpuMatrix header within a parent GpuMatrix. See below
+ void locateROI(Size& wholeSize, Point& ofs) const;
+ //! moves/resizes the current GpuMatrix ROI inside the parent GpuMatrix.
+ GpuMat& adjustROI(int dtop, int dbottom, int dleft, int dright);
+ //! extracts a rectangular sub-GpuMatrix
+ // (this is a generalized form of row, rowRange etc.)
+ GpuMat operator()(Range rowRange, Range colRange) const;
+ GpuMat operator()(Rect roi) const;
+
+ //! returns true iff the GpuMatrix data is continuous
+ // (i.e. when there are no gaps between successive rows).
+ // similar to CV_IS_GpuMat_CONT(cvGpuMat->type)
+ bool isContinuous() const;
+ //! returns element size in bytes,
+ // similar to CV_ELEM_SIZE(cvMat->type)
+ size_t elemSize() const;
+ //! returns the size of element channel in bytes.
+ size_t elemSize1() const;
+ //! returns element type, similar to CV_MAT_TYPE(cvMat->type)
+ int type() const;
+ //! returns element type, similar to CV_MAT_DEPTH(cvMat->type)
+ int depth() const;
+ //! returns element type, similar to CV_MAT_CN(cvMat->type)
+ int channels() const;
+ //! returns step/elemSize1()
+ size_t step1() const;
+ //! returns GpuMatrix size:
+ // width == number of columns, height == number of rows
+ Size size() const;
+ //! returns true if GpuMatrix data is NULL
+ bool empty() const;
+
+ //! returns pointer to y-th row
+ uchar* ptr(int y = 0);
+ const uchar* ptr(int y = 0) const;
+
+ //! template version of the above method
+ template<typename _Tp> _Tp* ptr(int y = 0);
+ template<typename _Tp> const _Tp* ptr(int y = 0) const;
+
+ template <typename _Tp> operator PtrStepSz<_Tp>() const;
+ template <typename _Tp> operator PtrStep<_Tp>() const;
+
+ // Deprecated function
+ __CV_GPU_DEPR_BEFORE__ template <typename _Tp> operator DevMem2D_<_Tp>() const __CV_GPU_DEPR_AFTER__;
+ __CV_GPU_DEPR_BEFORE__ template <typename _Tp> operator PtrStep_<_Tp>() const __CV_GPU_DEPR_AFTER__;
+ #undef __CV_GPU_DEPR_BEFORE__
+ #undef __CV_GPU_DEPR_AFTER__
+
+ /*! includes several bit-fields:
+ - the magic signature
+ - continuity flag
+ - depth
+ - number of channels
+ */
+ int flags;
+
+ //! the number of rows and columns
+ int rows, cols;
+
+ //! a distance between successive rows in bytes; includes the gap if any
+ size_t step;
+
+ //! pointer to the data
+ uchar* data;
+
+ //! pointer to the reference counter;
+ // when GpuMatrix points to user-allocated data, the pointer is NULL
+ int* refcount;
+
+ //! helper fields used in locateROI and adjustROI
+ uchar* datastart;
+ uchar* dataend;
+ };
+
+ //! Creates continuous GPU matrix
+ CV_EXPORTS void createContinuous(int rows, int cols, int type, GpuMat& m);
+ CV_EXPORTS GpuMat createContinuous(int rows, int cols, int type);
+ CV_EXPORTS void createContinuous(Size size, int type, GpuMat& m);
+ CV_EXPORTS GpuMat createContinuous(Size size, int type);
+
+ //! Ensures that size of the given matrix is not less than (rows, cols) size
+ //! and matrix type is match specified one too
+ CV_EXPORTS void ensureSizeIsEnough(int rows, int cols, int type, GpuMat& m);
+ CV_EXPORTS void ensureSizeIsEnough(Size size, int type, GpuMat& m);
+
+ CV_EXPORTS GpuMat allocMatFromBuf(int rows, int cols, int type, GpuMat &mat);
+
+ ////////////////////////////////////////////////////////////////////////
+ // Error handling
+
+ CV_EXPORTS void error(const char* error_string, const char* file, const int line, const char* func = "");
+
+ ////////////////////////////////////////////////////////////////////////
+ ////////////////////////////////////////////////////////////////////////
+ ////////////////////////////////////////////////////////////////////////
+
+ inline GpuMat::GpuMat()
+ : flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0)
+ {
+ }
+
+ inline GpuMat::GpuMat(int rows_, int cols_, int type_)
+ : flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0)
+ {
+ if (rows_ > 0 && cols_ > 0)
+ create(rows_, cols_, type_);
+ }
+
+ inline GpuMat::GpuMat(Size size_, int type_)
+ : flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0)
+ {
+ if (size_.height > 0 && size_.width > 0)
+ create(size_.height, size_.width, type_);
+ }
+
+ inline GpuMat::GpuMat(int rows_, int cols_, int type_, Scalar s_)
+ : flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0)
+ {
+ if (rows_ > 0 && cols_ > 0)
+ {
+ create(rows_, cols_, type_);
+ setTo(s_);
+ }
+ }
+
+ inline GpuMat::GpuMat(Size size_, int type_, Scalar s_)
+ : flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0)
+ {
+ if (size_.height > 0 && size_.width > 0)
+ {
+ create(size_.height, size_.width, type_);
+ setTo(s_);
+ }
+ }
+
+ inline GpuMat::~GpuMat()
+ {
+ release();
+ }
+
+ inline GpuMat GpuMat::clone() const
+ {
+ GpuMat m;
+ copyTo(m);
+ return m;
+ }
+
+ inline void GpuMat::assignTo(GpuMat& m, int _type) const
+ {
+ if (_type < 0)
+ m = *this;
+ else
+ convertTo(m, _type);
+ }
+
+ inline size_t GpuMat::step1() const
+ {
+ return step / elemSize1();
+ }
+
+ inline bool GpuMat::empty() const
+ {
+ return data == 0;
+ }
+
+ template<typename _Tp> inline _Tp* GpuMat::ptr(int y)
+ {
+ return (_Tp*)ptr(y);
+ }
+
+ template<typename _Tp> inline const _Tp* GpuMat::ptr(int y) const
+ {
+ return (const _Tp*)ptr(y);
+ }
+
+ inline void swap(GpuMat& a, GpuMat& b)
+ {
+ a.swap(b);
+ }
+
+ inline GpuMat GpuMat::row(int y) const
+ {
+ return GpuMat(*this, Range(y, y+1), Range::all());
+ }
+
+ inline GpuMat GpuMat::col(int x) const
+ {
+ return GpuMat(*this, Range::all(), Range(x, x+1));
+ }
+
+ inline GpuMat GpuMat::rowRange(int startrow, int endrow) const
+ {
+ return GpuMat(*this, Range(startrow, endrow), Range::all());
+ }
+
+ inline GpuMat GpuMat::rowRange(Range r) const
+ {
+ return GpuMat(*this, r, Range::all());
+ }
+
+ inline GpuMat GpuMat::colRange(int startcol, int endcol) const
+ {
+ return GpuMat(*this, Range::all(), Range(startcol, endcol));
+ }
+
+ inline GpuMat GpuMat::colRange(Range r) const
+ {
+ return GpuMat(*this, Range::all(), r);
+ }
+
+ inline void GpuMat::create(Size size_, int type_)
+ {
+ create(size_.height, size_.width, type_);
+ }
+
+ inline GpuMat GpuMat::operator()(Range _rowRange, Range _colRange) const
+ {
+ return GpuMat(*this, _rowRange, _colRange);
+ }
+
+ inline GpuMat GpuMat::operator()(Rect roi) const
+ {
+ return GpuMat(*this, roi);
+ }
+
+ inline bool GpuMat::isContinuous() const
+ {
+ return (flags & Mat::CONTINUOUS_FLAG) != 0;
+ }
+
+ inline size_t GpuMat::elemSize() const
+ {
+ return CV_ELEM_SIZE(flags);
+ }
+
+ inline size_t GpuMat::elemSize1() const
+ {
+ return CV_ELEM_SIZE1(flags);
+ }
+
+ inline int GpuMat::type() const
+ {
+ return CV_MAT_TYPE(flags);
+ }
+
+ inline int GpuMat::depth() const
+ {
+ return CV_MAT_DEPTH(flags);
+ }
+
+ inline int GpuMat::channels() const
+ {
+ return CV_MAT_CN(flags);
+ }
+
+ inline Size GpuMat::size() const
+ {
+ return Size(cols, rows);
+ }
+
+ inline uchar* GpuMat::ptr(int y)
+ {
+ CV_DbgAssert((unsigned)y < (unsigned)rows);
+ return data + step * y;
+ }
+
+ inline const uchar* GpuMat::ptr(int y) const
+ {
+ CV_DbgAssert((unsigned)y < (unsigned)rows);
+ return data + step * y;
+ }
+
+ inline GpuMat& GpuMat::operator = (Scalar s)
+ {
+ setTo(s);
+ return *this;
+ }
+
+ /** @cond IGNORED */
+ template <class T> inline GpuMat::operator PtrStepSz<T>() const
+ {
+ return PtrStepSz<T>(rows, cols, (T*)data, step);
+ }
+
+ template <class T> inline GpuMat::operator PtrStep<T>() const
+ {
+ return PtrStep<T>((T*)data, step);
+ }
+
+ template <class T> inline GpuMat::operator DevMem2D_<T>() const
+ {
+ return DevMem2D_<T>(rows, cols, (T*)data, step);
+ }
+
+ template <class T> inline GpuMat::operator PtrStep_<T>() const
+ {
+ return PtrStep_<T>(static_cast< DevMem2D_<T> >(*this));
+ }
+ /** @endcond */
+
+ inline GpuMat createContinuous(int rows, int cols, int type)
+ {
+ GpuMat m;
+ createContinuous(rows, cols, type, m);
+ return m;
+ }
+
+ inline void createContinuous(Size size, int type, GpuMat& m)
+ {
+ createContinuous(size.height, size.width, type, m);
+ }
+
+ inline GpuMat createContinuous(Size size, int type)
+ {
+ GpuMat m;
+ createContinuous(size, type, m);
+ return m;
+ }
+
+ inline void ensureSizeIsEnough(Size size, int type, GpuMat& m)
+ {
+ ensureSizeIsEnough(size.height, size.width, type, m);
+ }
+}}
+
+#endif // __cplusplus
+
+#endif // __OPENCV_GPUMAT_HPP__
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/internal.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/internal.hpp
new file mode 100644
index 00000000..c2c89613
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/internal.hpp
@@ -0,0 +1,795 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+/* The header is for internal use and it is likely to change.
+ It contains some macro definitions that are used in cxcore, cv, cvaux
+ and, probably, other libraries. If you need some of this functionality,
+ the safe way is to copy it into your code and rename the macros.
+*/
+#ifndef __OPENCV_CORE_INTERNAL_HPP__
+#define __OPENCV_CORE_INTERNAL_HPP__
+
+#include <vector>
+
+#include "opencv2/core/core.hpp"
+#include "opencv2/core/types_c.h"
+
+#if defined WIN32 || defined _WIN32
+# ifndef WIN32
+# define WIN32
+# endif
+# ifndef _WIN32
+# define _WIN32
+# endif
+#endif
+
+#if !defined WIN32 && !defined WINCE
+# include <pthread.h>
+#endif
+
+#ifdef __BORLANDC__
+# ifndef WIN32
+# define WIN32
+# endif
+# ifndef _WIN32
+# define _WIN32
+# endif
+# define CV_DLL
+# undef _CV_ALWAYS_PROFILE_
+# define _CV_ALWAYS_NO_PROFILE_
+#endif
+
+#ifndef FALSE
+# define FALSE 0
+#endif
+#ifndef TRUE
+# define TRUE 1
+#endif
+
+#define __BEGIN__ __CV_BEGIN__
+#define __END__ __CV_END__
+#define EXIT __CV_EXIT__
+
+#ifdef HAVE_IPP
+# include "ipp.h"
+
+CV_INLINE IppiSize ippiSize(int width, int height)
+{
+ IppiSize size = { width, height };
+ return size;
+}
+
+CV_INLINE IppiSize ippiSize(const cv::Size & _size)
+{
+ IppiSize size = { _size.width, _size.height };
+ return size;
+}
+
+#endif
+
+#ifndef IPPI_CALL
+# define IPPI_CALL(func) CV_Assert((func) >= 0)
+#endif
+
+#if defined __SSE2__ || defined _M_X64 || (defined _M_IX86_FP && _M_IX86_FP >= 2)
+# include "emmintrin.h"
+# define CV_SSE 1
+# define CV_SSE2 1
+# if defined __SSE3__ || (defined _MSC_VER && _MSC_VER >= 1500)
+# include "pmmintrin.h"
+# define CV_SSE3 1
+# endif
+# if defined __SSSE3__ || (defined _MSC_VER && _MSC_VER >= 1500)
+# include "tmmintrin.h"
+# define CV_SSSE3 1
+# endif
+# if defined __SSE4_1__ || (defined _MSC_VER && _MSC_VER >= 1500)
+# include <smmintrin.h>
+# define CV_SSE4_1 1
+# endif
+# if defined __SSE4_2__ || (defined _MSC_VER && _MSC_VER >= 1500)
+# include <nmmintrin.h>
+# define CV_SSE4_2 1
+# endif
+# if defined __AVX__ || (defined _MSC_FULL_VER && _MSC_FULL_VER >= 160040219)
+// MS Visual Studio 2010 (2012?) has no macro pre-defined to identify the use of /arch:AVX
+// See: http://connect.microsoft.com/VisualStudio/feedback/details/605858/arch-avx-should-define-a-predefined-macro-in-x64-and-set-a-unique-value-for-m-ix86-fp-in-win32
+# include <immintrin.h>
+# define CV_AVX 1
+# if defined(_XCR_XFEATURE_ENABLED_MASK)
+# define __xgetbv() _xgetbv(_XCR_XFEATURE_ENABLED_MASK)
+# else
+# define __xgetbv() 0
+# endif
+# endif
+# if defined __AVX2__
+# include <immintrin.h>
+# define CV_AVX2 1
+# endif
+#endif
+
+
+#if (defined WIN32 || defined _WIN32) && defined(_M_ARM)
+# include <Intrin.h>
+# include "arm_neon.h"
+# define CV_NEON 1
+# define CPU_HAS_NEON_FEATURE (true)
+#elif defined(__ARM_NEON__) || defined(__ARM_NEON)
+# include <arm_neon.h>
+# define CV_NEON 1
+# define CPU_HAS_NEON_FEATURE (true)
+#endif
+
+#ifndef CV_SSE
+# define CV_SSE 0
+#endif
+#ifndef CV_SSE2
+# define CV_SSE2 0
+#endif
+#ifndef CV_SSE3
+# define CV_SSE3 0
+#endif
+#ifndef CV_SSSE3
+# define CV_SSSE3 0
+#endif
+#ifndef CV_SSE4_1
+# define CV_SSE4_1 0
+#endif
+#ifndef CV_SSE4_2
+# define CV_SSE4_2 0
+#endif
+#ifndef CV_AVX
+# define CV_AVX 0
+#endif
+#ifndef CV_AVX2
+# define CV_AVX2 0
+#endif
+#ifndef CV_NEON
+# define CV_NEON 0
+#endif
+
+#ifdef HAVE_TBB
+# include "tbb/tbb_stddef.h"
+# if TBB_VERSION_MAJOR*100 + TBB_VERSION_MINOR >= 202
+# include "tbb/tbb.h"
+# include "tbb/task.h"
+# undef min
+# undef max
+# else
+# undef HAVE_TBB
+# endif
+#endif
+
+#ifdef HAVE_EIGEN
+# if defined __GNUC__ && defined __APPLE__
+# pragma GCC diagnostic ignored "-Wshadow"
+# endif
+# include <Eigen/Core>
+# include "opencv2/core/eigen.hpp"
+#endif
+
+#ifdef __cplusplus
+
+namespace cv
+{
+#ifdef HAVE_TBB
+
+ typedef tbb::blocked_range<int> BlockedRange;
+
+ template<typename Body> static inline
+ void parallel_for( const BlockedRange& range, const Body& body )
+ {
+ tbb::parallel_for(range, body);
+ }
+
+ template<typename Iterator, typename Body> static inline
+ void parallel_do( Iterator first, Iterator last, const Body& body )
+ {
+ tbb::parallel_do(first, last, body);
+ }
+
+ typedef tbb::split Split;
+
+ template<typename Body> static inline
+ void parallel_reduce( const BlockedRange& range, Body& body )
+ {
+ tbb::parallel_reduce(range, body);
+ }
+
+ typedef tbb::concurrent_vector<Rect> ConcurrentRectVector;
+ typedef tbb::concurrent_vector<double> ConcurrentDoubleVector;
+#else
+ class BlockedRange
+ {
+ public:
+ BlockedRange() : _begin(0), _end(0), _grainsize(0) {}
+ BlockedRange(int b, int e, int g=1) : _begin(b), _end(e), _grainsize(g) {}
+ int begin() const { return _begin; }
+ int end() const { return _end; }
+ int grainsize() const { return _grainsize; }
+
+ protected:
+ int _begin, _end, _grainsize;
+ };
+
+ template<typename Body> static inline
+ void parallel_for( const BlockedRange& range, const Body& body )
+ {
+ body(range);
+ }
+ typedef std::vector<Rect> ConcurrentRectVector;
+ typedef std::vector<double> ConcurrentDoubleVector;
+
+ template<typename Iterator, typename Body> static inline
+ void parallel_do( Iterator first, Iterator last, const Body& body )
+ {
+ for( ; first != last; ++first )
+ body(*first);
+ }
+
+ class Split {};
+
+ template<typename Body> static inline
+ void parallel_reduce( const BlockedRange& range, Body& body )
+ {
+ body(range);
+ }
+#endif
+
+ // Returns a static string if there is a parallel framework,
+ // NULL otherwise.
+ CV_EXPORTS const char* currentParallelFramework();
+} //namespace cv
+
+#define CV_INIT_ALGORITHM(classname, algname, memberinit) \
+ static ::cv::Algorithm* create##classname() \
+ { \
+ return new classname; \
+ } \
+ \
+ static ::cv::AlgorithmInfo& classname##_info() \
+ { \
+ static ::cv::AlgorithmInfo classname##_info_var(algname, create##classname); \
+ return classname##_info_var; \
+ } \
+ \
+ static ::cv::AlgorithmInfo& classname##_info_auto = classname##_info(); \
+ \
+ ::cv::AlgorithmInfo* classname::info() const \
+ { \
+ static volatile bool initialized = false; \
+ \
+ if( !initialized ) \
+ { \
+ initialized = true; \
+ classname obj; \
+ memberinit; \
+ } \
+ return &classname##_info(); \
+ }
+
+#endif //__cplusplus
+
+/* maximal size of vector to run matrix operations on it inline (i.e. w/o ipp calls) */
+#define CV_MAX_INLINE_MAT_OP_SIZE 10
+
+/* maximal linear size of matrix to allocate it on stack. */
+#define CV_MAX_LOCAL_MAT_SIZE 32
+
+/* maximal size of local memory storage */
+#define CV_MAX_LOCAL_SIZE \
+ (CV_MAX_LOCAL_MAT_SIZE*CV_MAX_LOCAL_MAT_SIZE*(int)sizeof(double))
+
+/* default image row align (in bytes) */
+#define CV_DEFAULT_IMAGE_ROW_ALIGN 4
+
+/* matrices are continuous by default */
+#define CV_DEFAULT_MAT_ROW_ALIGN 1
+
+/* maximum size of dynamic memory buffer.
+ cvAlloc reports an error if a larger block is requested. */
+#define CV_MAX_ALLOC_SIZE (((size_t)1 << (sizeof(size_t)*8-2)))
+
+/* the alignment of all the allocated buffers */
+#define CV_MALLOC_ALIGN 16
+
+/* default alignment for dynamic data strucutures, resided in storages. */
+#define CV_STRUCT_ALIGN ((int)sizeof(double))
+
+/* default storage block size */
+#define CV_STORAGE_BLOCK_SIZE ((1<<16) - 128)
+
+/* default memory block for sparse array elements */
+#define CV_SPARSE_MAT_BLOCK (1<<12)
+
+/* initial hash table size */
+#define CV_SPARSE_HASH_SIZE0 (1<<10)
+
+/* maximal average node_count/hash_size ratio beyond which hash table is resized */
+#define CV_SPARSE_HASH_RATIO 3
+
+/* max length of strings */
+#define CV_MAX_STRLEN 1024
+
+#if 0 /*def CV_CHECK_FOR_NANS*/
+# define CV_CHECK_NANS( arr ) cvCheckArray((arr))
+#else
+# define CV_CHECK_NANS( arr )
+#endif
+
+/****************************************************************************************\
+* Common declarations *
+\****************************************************************************************/
+
+#ifdef __GNUC__
+# define CV_DECL_ALIGNED(x) __attribute__ ((aligned (x)))
+#elif defined _MSC_VER
+# define CV_DECL_ALIGNED(x) __declspec(align(x))
+#else
+# define CV_DECL_ALIGNED(x)
+#endif
+
+#ifndef CV_IMPL
+# define CV_IMPL CV_EXTERN_C
+#endif
+
+#define CV_DBG_BREAK() { volatile int* crashMe = 0; *crashMe = 0; }
+
+/* default step, set in case of continuous data
+ to work around checks for valid step in some ipp functions */
+#define CV_STUB_STEP (1 << 30)
+
+#define CV_SIZEOF_FLOAT ((int)sizeof(float))
+#define CV_SIZEOF_SHORT ((int)sizeof(short))
+
+#define CV_ORIGIN_TL 0
+#define CV_ORIGIN_BL 1
+
+/* IEEE754 constants and macros */
+#define CV_POS_INF 0x7f800000
+#define CV_NEG_INF 0x807fffff /* CV_TOGGLE_FLT(0xff800000) */
+#define CV_1F 0x3f800000
+#define CV_TOGGLE_FLT(x) ((x)^((int)(x) < 0 ? 0x7fffffff : 0))
+#define CV_TOGGLE_DBL(x) \
+ ((x)^((int64)(x) < 0 ? CV_BIG_INT(0x7fffffffffffffff) : 0))
+
+#define CV_NOP(a) (a)
+#define CV_ADD(a, b) ((a) + (b))
+#define CV_SUB(a, b) ((a) - (b))
+#define CV_MUL(a, b) ((a) * (b))
+#define CV_AND(a, b) ((a) & (b))
+#define CV_OR(a, b) ((a) | (b))
+#define CV_XOR(a, b) ((a) ^ (b))
+#define CV_ANDN(a, b) (~(a) & (b))
+#define CV_ORN(a, b) (~(a) | (b))
+#define CV_SQR(a) ((a) * (a))
+
+#define CV_LT(a, b) ((a) < (b))
+#define CV_LE(a, b) ((a) <= (b))
+#define CV_EQ(a, b) ((a) == (b))
+#define CV_NE(a, b) ((a) != (b))
+#define CV_GT(a, b) ((a) > (b))
+#define CV_GE(a, b) ((a) >= (b))
+
+#define CV_NONZERO(a) ((a) != 0)
+#define CV_NONZERO_FLT(a) (((a)+(a)) != 0)
+
+/* general-purpose saturation macros */
+#define CV_CAST_8U(t) (uchar)(!((t) & ~255) ? (t) : (t) > 0 ? 255 : 0)
+#define CV_CAST_8S(t) (schar)(!(((t)+128) & ~255) ? (t) : (t) > 0 ? 127 : -128)
+#define CV_CAST_16U(t) (ushort)(!((t) & ~65535) ? (t) : (t) > 0 ? 65535 : 0)
+#define CV_CAST_16S(t) (short)(!(((t)+32768) & ~65535) ? (t) : (t) > 0 ? 32767 : -32768)
+#define CV_CAST_32S(t) (int)(t)
+#define CV_CAST_64S(t) (int64)(t)
+#define CV_CAST_32F(t) (float)(t)
+#define CV_CAST_64F(t) (double)(t)
+
+#define CV_PASTE2(a,b) a##b
+#define CV_PASTE(a,b) CV_PASTE2(a,b)
+
+#define CV_EMPTY
+#define CV_MAKE_STR(a) #a
+
+#define CV_ZERO_OBJ(x) memset((x), 0, sizeof(*(x)))
+
+#define CV_DIM(static_array) ((int)(sizeof(static_array)/sizeof((static_array)[0])))
+
+#define cvUnsupportedFormat "Unsupported format"
+
+CV_INLINE void* cvAlignPtr( const void* ptr, int align CV_DEFAULT(32) )
+{
+ assert( (align & (align-1)) == 0 );
+ return (void*)( ((size_t)ptr + align - 1) & ~(size_t)(align-1) );
+}
+
+CV_INLINE int cvAlign( int size, int align )
+{
+ assert( (align & (align-1)) == 0 && size < INT_MAX );
+ return (size + align - 1) & -align;
+}
+
+CV_INLINE CvSize cvGetMatSize( const CvMat* mat )
+{
+ CvSize size;
+ size.width = mat->cols;
+ size.height = mat->rows;
+ return size;
+}
+
+#define CV_DESCALE(x,n) (((x) + (1 << ((n)-1))) >> (n))
+#define CV_FLT_TO_FIX(x,n) cvRound((x)*(1<<(n)))
+
+/****************************************************************************************\
+
+ Generic implementation of QuickSort algorithm.
+ ----------------------------------------------
+ Using this macro user can declare customized sort function that can be much faster
+ than built-in qsort function because of lower overhead on elements
+ comparison and exchange. The macro takes less_than (or LT) argument - a macro or function
+ that takes 2 arguments returns non-zero if the first argument should be before the second
+ one in the sorted sequence and zero otherwise.
+
+ Example:
+
+ Suppose that the task is to sort points by ascending of y coordinates and if
+ y's are equal x's should ascend.
+
+ The code is:
+ ------------------------------------------------------------------------------
+ #define cmp_pts( pt1, pt2 ) \
+ ((pt1).y < (pt2).y || ((pt1).y < (pt2).y && (pt1).x < (pt2).x))
+
+ [static] CV_IMPLEMENT_QSORT( icvSortPoints, CvPoint, cmp_pts )
+ ------------------------------------------------------------------------------
+
+ After that the function "void icvSortPoints( CvPoint* array, size_t total, int aux );"
+ is available to user.
+
+ aux is an additional parameter, which can be used when comparing elements.
+ The current implementation was derived from *BSD system qsort():
+
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+
+\****************************************************************************************/
+
+#define CV_IMPLEMENT_QSORT_EX( func_name, T, LT, user_data_type ) \
+void func_name( T *array, size_t total, user_data_type aux ) \
+{ \
+ int isort_thresh = 7; \
+ T t; \
+ int sp = 0; \
+ \
+ struct \
+ { \
+ T *lb; \
+ T *ub; \
+ } \
+ stack[48]; \
+ \
+ aux = aux; \
+ \
+ if( total <= 1 ) \
+ return; \
+ \
+ stack[0].lb = array; \
+ stack[0].ub = array + (total - 1); \
+ \
+ while( sp >= 0 ) \
+ { \
+ T* left = stack[sp].lb; \
+ T* right = stack[sp--].ub; \
+ \
+ for(;;) \
+ { \
+ int i, n = (int)(right - left) + 1, m; \
+ T* ptr; \
+ T* ptr2; \
+ \
+ if( n <= isort_thresh ) \
+ { \
+ insert_sort: \
+ for( ptr = left + 1; ptr <= right; ptr++ ) \
+ { \
+ for( ptr2 = ptr; ptr2 > left && LT(ptr2[0],ptr2[-1]); ptr2--) \
+ CV_SWAP( ptr2[0], ptr2[-1], t ); \
+ } \
+ break; \
+ } \
+ else \
+ { \
+ T* left0; \
+ T* left1; \
+ T* right0; \
+ T* right1; \
+ T* pivot; \
+ T* a; \
+ T* b; \
+ T* c; \
+ int swap_cnt = 0; \
+ \
+ left0 = left; \
+ right0 = right; \
+ pivot = left + (n/2); \
+ \
+ if( n > 40 ) \
+ { \
+ int d = n / 8; \
+ a = left, b = left + d, c = left + 2*d; \
+ left = LT(*a, *b) ? (LT(*b, *c) ? b : (LT(*a, *c) ? c : a)) \
+ : (LT(*c, *b) ? b : (LT(*a, *c) ? a : c)); \
+ \
+ a = pivot - d, b = pivot, c = pivot + d; \
+ pivot = LT(*a, *b) ? (LT(*b, *c) ? b : (LT(*a, *c) ? c : a)) \
+ : (LT(*c, *b) ? b : (LT(*a, *c) ? a : c)); \
+ \
+ a = right - 2*d, b = right - d, c = right; \
+ right = LT(*a, *b) ? (LT(*b, *c) ? b : (LT(*a, *c) ? c : a)) \
+ : (LT(*c, *b) ? b : (LT(*a, *c) ? a : c)); \
+ } \
+ \
+ a = left, b = pivot, c = right; \
+ pivot = LT(*a, *b) ? (LT(*b, *c) ? b : (LT(*a, *c) ? c : a)) \
+ : (LT(*c, *b) ? b : (LT(*a, *c) ? a : c)); \
+ if( pivot != left0 ) \
+ { \
+ CV_SWAP( *pivot, *left0, t ); \
+ pivot = left0; \
+ } \
+ left = left1 = left0 + 1; \
+ right = right1 = right0; \
+ \
+ for(;;) \
+ { \
+ while( left <= right && !LT(*pivot, *left) ) \
+ { \
+ if( !LT(*left, *pivot) ) \
+ { \
+ if( left > left1 ) \
+ CV_SWAP( *left1, *left, t ); \
+ swap_cnt = 1; \
+ left1++; \
+ } \
+ left++; \
+ } \
+ \
+ while( left <= right && !LT(*right, *pivot) ) \
+ { \
+ if( !LT(*pivot, *right) ) \
+ { \
+ if( right < right1 ) \
+ CV_SWAP( *right1, *right, t ); \
+ swap_cnt = 1; \
+ right1--; \
+ } \
+ right--; \
+ } \
+ \
+ if( left > right ) \
+ break; \
+ CV_SWAP( *left, *right, t ); \
+ swap_cnt = 1; \
+ left++; \
+ right--; \
+ } \
+ \
+ if( swap_cnt == 0 ) \
+ { \
+ left = left0, right = right0; \
+ goto insert_sort; \
+ } \
+ \
+ n = MIN( (int)(left1 - left0), (int)(left - left1) ); \
+ for( i = 0; i < n; i++ ) \
+ CV_SWAP( left0[i], left[i-n], t ); \
+ \
+ n = MIN( (int)(right0 - right1), (int)(right1 - right) ); \
+ for( i = 0; i < n; i++ ) \
+ CV_SWAP( left[i], right0[i-n+1], t ); \
+ n = (int)(left - left1); \
+ m = (int)(right1 - right); \
+ if( n > 1 ) \
+ { \
+ if( m > 1 ) \
+ { \
+ if( n > m ) \
+ { \
+ stack[++sp].lb = left0; \
+ stack[sp].ub = left0 + n - 1; \
+ left = right0 - m + 1, right = right0; \
+ } \
+ else \
+ { \
+ stack[++sp].lb = right0 - m + 1; \
+ stack[sp].ub = right0; \
+ left = left0, right = left0 + n - 1; \
+ } \
+ } \
+ else \
+ left = left0, right = left0 + n - 1; \
+ } \
+ else if( m > 1 ) \
+ left = right0 - m + 1, right = right0; \
+ else \
+ break; \
+ } \
+ } \
+ } \
+}
+
+#define CV_IMPLEMENT_QSORT( func_name, T, cmp ) \
+ CV_IMPLEMENT_QSORT_EX( func_name, T, cmp, int )
+
+/****************************************************************************************\
+* Structures and macros for integration with IPP *
+\****************************************************************************************/
+
+/* IPP-compatible return codes */
+typedef enum CvStatus
+{
+ CV_BADMEMBLOCK_ERR = -113,
+ CV_INPLACE_NOT_SUPPORTED_ERR= -112,
+ CV_UNMATCHED_ROI_ERR = -111,
+ CV_NOTFOUND_ERR = -110,
+ CV_BADCONVERGENCE_ERR = -109,
+
+ CV_BADDEPTH_ERR = -107,
+ CV_BADROI_ERR = -106,
+ CV_BADHEADER_ERR = -105,
+ CV_UNMATCHED_FORMATS_ERR = -104,
+ CV_UNSUPPORTED_COI_ERR = -103,
+ CV_UNSUPPORTED_CHANNELS_ERR = -102,
+ CV_UNSUPPORTED_DEPTH_ERR = -101,
+ CV_UNSUPPORTED_FORMAT_ERR = -100,
+
+ CV_BADARG_ERR = -49, //ipp comp
+ CV_NOTDEFINED_ERR = -48, //ipp comp
+
+ CV_BADCHANNELS_ERR = -47, //ipp comp
+ CV_BADRANGE_ERR = -44, //ipp comp
+ CV_BADSTEP_ERR = -29, //ipp comp
+
+ CV_BADFLAG_ERR = -12,
+ CV_DIV_BY_ZERO_ERR = -11, //ipp comp
+ CV_BADCOEF_ERR = -10,
+
+ CV_BADFACTOR_ERR = -7,
+ CV_BADPOINT_ERR = -6,
+ CV_BADSCALE_ERR = -4,
+ CV_OUTOFMEM_ERR = -3,
+ CV_NULLPTR_ERR = -2,
+ CV_BADSIZE_ERR = -1,
+ CV_NO_ERR = 0,
+ CV_OK = CV_NO_ERR
+}
+CvStatus;
+
+#define CV_NOTHROW throw()
+
+typedef struct CvFuncTable
+{
+ void* fn_2d[CV_DEPTH_MAX];
+}
+CvFuncTable;
+
+typedef struct CvBigFuncTable
+{
+ void* fn_2d[CV_DEPTH_MAX*4];
+} CvBigFuncTable;
+
+#define CV_INIT_FUNC_TAB( tab, FUNCNAME, FLAG ) \
+ (tab).fn_2d[CV_8U] = (void*)FUNCNAME##_8u##FLAG; \
+ (tab).fn_2d[CV_8S] = 0; \
+ (tab).fn_2d[CV_16U] = (void*)FUNCNAME##_16u##FLAG; \
+ (tab).fn_2d[CV_16S] = (void*)FUNCNAME##_16s##FLAG; \
+ (tab).fn_2d[CV_32S] = (void*)FUNCNAME##_32s##FLAG; \
+ (tab).fn_2d[CV_32F] = (void*)FUNCNAME##_32f##FLAG; \
+ (tab).fn_2d[CV_64F] = (void*)FUNCNAME##_64f##FLAG
+
+#ifdef __cplusplus
+
+// < Deprecated
+
+class CV_EXPORTS CvOpenGlFuncTab
+{
+public:
+ virtual ~CvOpenGlFuncTab();
+
+ virtual void genBuffers(int n, unsigned int* buffers) const = 0;
+ virtual void deleteBuffers(int n, const unsigned int* buffers) const = 0;
+
+ virtual void bufferData(unsigned int target, ptrdiff_t size, const void* data, unsigned int usage) const = 0;
+ virtual void bufferSubData(unsigned int target, ptrdiff_t offset, ptrdiff_t size, const void* data) const = 0;
+
+ virtual void bindBuffer(unsigned int target, unsigned int buffer) const = 0;
+
+ virtual void* mapBuffer(unsigned int target, unsigned int access) const = 0;
+ virtual void unmapBuffer(unsigned int target) const = 0;
+
+ virtual void generateBitmapFont(const std::string& family, int height, int weight, bool italic, bool underline, int start, int count, int base) const = 0;
+
+ virtual bool isGlContextInitialized() const = 0;
+};
+
+CV_EXPORTS void icvSetOpenGlFuncTab(const CvOpenGlFuncTab* tab);
+
+CV_EXPORTS bool icvCheckGlError(const char* file, const int line, const char* func = "");
+
+// >
+
+namespace cv { namespace ogl {
+CV_EXPORTS bool checkError(const char* file, const int line, const char* func = "");
+}}
+
+#define CV_CheckGlError() CV_DbgAssert( (cv::ogl::checkError(__FILE__, __LINE__, CV_Func)) )
+
+#endif //__cplusplus
+
+#endif // __OPENCV_CORE_INTERNAL_HPP__
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/mat.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/mat.hpp
new file mode 100644
index 00000000..631c6980
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/mat.hpp
@@ -0,0 +1,2625 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_CORE_MATRIX_OPERATIONS_HPP__
+#define __OPENCV_CORE_MATRIX_OPERATIONS_HPP__
+
+#ifndef SKIP_INCLUDES
+#include <limits.h>
+#include <string.h>
+#endif // SKIP_INCLUDES
+
+#ifdef __cplusplus
+
+namespace cv
+{
+
+//////////////////////////////// Mat ////////////////////////////////
+
+inline void Mat::initEmpty()
+{
+ flags = MAGIC_VAL;
+ dims = rows = cols = 0;
+ data = datastart = dataend = datalimit = 0;
+ refcount = 0;
+ allocator = 0;
+}
+
+inline Mat::Mat() : size(&rows)
+{
+ initEmpty();
+}
+
+inline Mat::Mat(int _rows, int _cols, int _type) : size(&rows)
+{
+ initEmpty();
+ create(_rows, _cols, _type);
+}
+
+inline Mat::Mat(int _rows, int _cols, int _type, const Scalar& _s) : size(&rows)
+{
+ initEmpty();
+ create(_rows, _cols, _type);
+ *this = _s;
+}
+
+inline Mat::Mat(Size _sz, int _type) : size(&rows)
+{
+ initEmpty();
+ create( _sz.height, _sz.width, _type );
+}
+
+inline Mat::Mat(Size _sz, int _type, const Scalar& _s) : size(&rows)
+{
+ initEmpty();
+ create(_sz.height, _sz.width, _type);
+ *this = _s;
+}
+
+inline Mat::Mat(int _dims, const int* _sz, int _type) : size(&rows)
+{
+ initEmpty();
+ create(_dims, _sz, _type);
+}
+
+inline Mat::Mat(int _dims, const int* _sz, int _type, const Scalar& _s) : size(&rows)
+{
+ initEmpty();
+ create(_dims, _sz, _type);
+ *this = _s;
+}
+
+inline Mat::Mat(const Mat& m)
+ : flags(m.flags), dims(m.dims), rows(m.rows), cols(m.cols), data(m.data),
+ refcount(m.refcount), datastart(m.datastart), dataend(m.dataend),
+ datalimit(m.datalimit), allocator(m.allocator), size(&rows)
+{
+ if( refcount )
+ CV_XADD(refcount, 1);
+ if( m.dims <= 2 )
+ {
+ step[0] = m.step[0]; step[1] = m.step[1];
+ }
+ else
+ {
+ dims = 0;
+ copySize(m);
+ }
+}
+
+inline Mat::Mat(int _rows, int _cols, int _type, void* _data, size_t _step)
+ : flags(MAGIC_VAL + (_type & TYPE_MASK)), dims(2), rows(_rows), cols(_cols),
+ data((uchar*)_data), refcount(0), datastart((uchar*)_data), dataend(0),
+ datalimit(0), allocator(0), size(&rows)
+{
+ size_t esz = CV_ELEM_SIZE(_type), minstep = cols*esz;
+ if( _step == AUTO_STEP )
+ {
+ _step = minstep;
+ flags |= CONTINUOUS_FLAG;
+ }
+ else
+ {
+ if( rows == 1 ) _step = minstep;
+ CV_DbgAssert( _step >= minstep );
+ flags |= _step == minstep ? CONTINUOUS_FLAG : 0;
+ }
+ step[0] = _step; step[1] = esz;
+ datalimit = datastart + _step*rows;
+ dataend = datalimit - _step + minstep;
+}
+
+inline Mat::Mat(Size _sz, int _type, void* _data, size_t _step)
+ : flags(MAGIC_VAL + (_type & TYPE_MASK)), dims(2), rows(_sz.height), cols(_sz.width),
+ data((uchar*)_data), refcount(0), datastart((uchar*)_data), dataend(0),
+ datalimit(0), allocator(0), size(&rows)
+{
+ size_t esz = CV_ELEM_SIZE(_type), minstep = cols*esz;
+ if( _step == AUTO_STEP )
+ {
+ _step = minstep;
+ flags |= CONTINUOUS_FLAG;
+ }
+ else
+ {
+ if( rows == 1 ) _step = minstep;
+ CV_DbgAssert( _step >= minstep );
+ flags |= _step == minstep ? CONTINUOUS_FLAG : 0;
+ }
+ step[0] = _step; step[1] = esz;
+ datalimit = datastart + _step*rows;
+ dataend = datalimit - _step + minstep;
+}
+
+
+template<typename _Tp> inline Mat::Mat(const vector<_Tp>& vec, bool copyData)
+ : flags(MAGIC_VAL | DataType<_Tp>::type | CV_MAT_CONT_FLAG),
+ dims(2), rows((int)vec.size()), cols(1), data(0), refcount(0),
+ datastart(0), dataend(0), allocator(0), size(&rows)
+{
+ if(vec.empty())
+ return;
+ if( !copyData )
+ {
+ step[0] = step[1] = sizeof(_Tp);
+ data = datastart = (uchar*)&vec[0];
+ datalimit = dataend = datastart + rows*step[0];
+ }
+ else
+ Mat((int)vec.size(), 1, DataType<_Tp>::type, (uchar*)&vec[0]).copyTo(*this);
+}
+
+
+template<typename _Tp, int n> inline Mat::Mat(const Vec<_Tp, n>& vec, bool copyData)
+ : flags(MAGIC_VAL | DataType<_Tp>::type | CV_MAT_CONT_FLAG),
+ dims(2), rows(n), cols(1), data(0), refcount(0),
+ datastart(0), dataend(0), allocator(0), size(&rows)
+{
+ if( !copyData )
+ {
+ step[0] = step[1] = sizeof(_Tp);
+ data = datastart = (uchar*)vec.val;
+ datalimit = dataend = datastart + rows*step[0];
+ }
+ else
+ Mat(n, 1, DataType<_Tp>::type, (void*)vec.val).copyTo(*this);
+}
+
+
+template<typename _Tp, int m, int n> inline Mat::Mat(const Matx<_Tp,m,n>& M, bool copyData)
+ : flags(MAGIC_VAL | DataType<_Tp>::type | CV_MAT_CONT_FLAG),
+ dims(2), rows(m), cols(n), data(0), refcount(0),
+ datastart(0), dataend(0), allocator(0), size(&rows)
+{
+ if( !copyData )
+ {
+ step[0] = cols*sizeof(_Tp);
+ step[1] = sizeof(_Tp);
+ data = datastart = (uchar*)M.val;
+ datalimit = dataend = datastart + rows*step[0];
+ }
+ else
+ Mat(m, n, DataType<_Tp>::type, (uchar*)M.val).copyTo(*this);
+}
+
+
+template<typename _Tp> inline Mat::Mat(const Point_<_Tp>& pt, bool copyData)
+ : flags(MAGIC_VAL | DataType<_Tp>::type | CV_MAT_CONT_FLAG),
+ dims(2), rows(2), cols(1), data(0), refcount(0),
+ datastart(0), dataend(0), allocator(0), size(&rows)
+{
+ if( !copyData )
+ {
+ step[0] = step[1] = sizeof(_Tp);
+ data = datastart = (uchar*)&pt.x;
+ datalimit = dataend = datastart + rows*step[0];
+ }
+ else
+ {
+ create(2, 1, DataType<_Tp>::type);
+ ((_Tp*)data)[0] = pt.x;
+ ((_Tp*)data)[1] = pt.y;
+ }
+}
+
+
+template<typename _Tp> inline Mat::Mat(const Point3_<_Tp>& pt, bool copyData)
+ : flags(MAGIC_VAL | DataType<_Tp>::type | CV_MAT_CONT_FLAG),
+ dims(2), rows(3), cols(1), data(0), refcount(0),
+ datastart(0), dataend(0), allocator(0), size(&rows)
+{
+ if( !copyData )
+ {
+ step[0] = step[1] = sizeof(_Tp);
+ data = datastart = (uchar*)&pt.x;
+ datalimit = dataend = datastart + rows*step[0];
+ }
+ else
+ {
+ create(3, 1, DataType<_Tp>::type);
+ ((_Tp*)data)[0] = pt.x;
+ ((_Tp*)data)[1] = pt.y;
+ ((_Tp*)data)[2] = pt.z;
+ }
+}
+
+
+template<typename _Tp> inline Mat::Mat(const MatCommaInitializer_<_Tp>& commaInitializer)
+ : flags(MAGIC_VAL | DataType<_Tp>::type | CV_MAT_CONT_FLAG),
+ dims(0), rows(0), cols(0), data(0), refcount(0),
+ datastart(0), dataend(0), allocator(0), size(&rows)
+{
+ *this = *commaInitializer;
+}
+
+inline Mat::~Mat()
+{
+ release();
+ if( step.p != step.buf )
+ fastFree(step.p);
+}
+
+inline Mat& Mat::operator = (const Mat& m)
+{
+ if( this != &m )
+ {
+ if( m.refcount )
+ CV_XADD(m.refcount, 1);
+ release();
+ flags = m.flags;
+ if( dims <= 2 && m.dims <= 2 )
+ {
+ dims = m.dims;
+ rows = m.rows;
+ cols = m.cols;
+ step[0] = m.step[0];
+ step[1] = m.step[1];
+ }
+ else
+ copySize(m);
+ data = m.data;
+ datastart = m.datastart;
+ dataend = m.dataend;
+ datalimit = m.datalimit;
+ refcount = m.refcount;
+ allocator = m.allocator;
+ }
+ return *this;
+}
+
+inline Mat Mat::row(int y) const { return Mat(*this, Range(y, y+1), Range::all()); }
+inline Mat Mat::col(int x) const { return Mat(*this, Range::all(), Range(x, x+1)); }
+inline Mat Mat::rowRange(int startrow, int endrow) const
+ { return Mat(*this, Range(startrow, endrow), Range::all()); }
+inline Mat Mat::rowRange(const Range& r) const
+ { return Mat(*this, r, Range::all()); }
+inline Mat Mat::colRange(int startcol, int endcol) const
+ { return Mat(*this, Range::all(), Range(startcol, endcol)); }
+inline Mat Mat::colRange(const Range& r) const
+ { return Mat(*this, Range::all(), r); }
+
+inline Mat Mat::diag(const Mat& d)
+{
+ CV_Assert( d.cols == 1 || d.rows == 1 );
+ int len = d.rows + d.cols - 1;
+ Mat m(len, len, d.type(), Scalar(0)), md = m.diag();
+ if( d.cols == 1 )
+ d.copyTo(md);
+ else
+ transpose(d, md);
+ return m;
+}
+
+inline Mat Mat::clone() const
+{
+ Mat m;
+ copyTo(m);
+ return m;
+}
+
+inline void Mat::assignTo( Mat& m, int _type ) const
+{
+ if( _type < 0 )
+ m = *this;
+ else
+ convertTo(m, _type);
+}
+
+inline void Mat::create(int _rows, int _cols, int _type)
+{
+ _type &= TYPE_MASK;
+ if( dims <= 2 && rows == _rows && cols == _cols && type() == _type && data )
+ return;
+ int sz[] = {_rows, _cols};
+ create(2, sz, _type);
+}
+
+inline void Mat::create(Size _sz, int _type)
+{
+ create(_sz.height, _sz.width, _type);
+}
+
+inline void Mat::addref()
+{ if( refcount ) CV_XADD(refcount, 1); }
+
+inline void Mat::release()
+{
+ if( refcount && CV_XADD(refcount, -1) == 1 )
+ deallocate();
+ data = datastart = dataend = datalimit = 0;
+ for(int i = 0; i < dims; i++)
+ size.p[i] = 0;
+ refcount = 0;
+}
+
+inline Mat Mat::operator()( Range _rowRange, Range _colRange ) const
+{
+ return Mat(*this, _rowRange, _colRange);
+}
+
+inline Mat Mat::operator()( const Rect& roi ) const
+{ return Mat(*this, roi); }
+
+inline Mat Mat::operator()(const Range* ranges) const
+{
+ return Mat(*this, ranges);
+}
+
+inline Mat::operator CvMat() const
+{
+ CV_DbgAssert(dims <= 2);
+ CvMat m = cvMat(rows, dims == 1 ? 1 : cols, type(), data);
+ m.step = (int)step[0];
+ m.type = (m.type & ~CONTINUOUS_FLAG) | (flags & CONTINUOUS_FLAG);
+ return m;
+}
+
+inline bool Mat::isContinuous() const { return (flags & CONTINUOUS_FLAG) != 0; }
+inline bool Mat::isSubmatrix() const { return (flags & SUBMATRIX_FLAG) != 0; }
+inline size_t Mat::elemSize() const { return dims > 0 ? step.p[dims-1] : 0; }
+inline size_t Mat::elemSize1() const { return CV_ELEM_SIZE1(flags); }
+inline int Mat::type() const { return CV_MAT_TYPE(flags); }
+inline int Mat::depth() const { return CV_MAT_DEPTH(flags); }
+inline int Mat::channels() const { return CV_MAT_CN(flags); }
+inline size_t Mat::step1(int i) const { return step.p[i]/elemSize1(); }
+inline bool Mat::empty() const { return data == 0 || total() == 0; }
+inline size_t Mat::total() const
+{
+ if( dims <= 2 )
+ return (size_t)rows*cols;
+ size_t p = 1;
+ for( int i = 0; i < dims; i++ )
+ p *= size[i];
+ return p;
+}
+
+inline uchar* Mat::ptr(int y)
+{
+ CV_DbgAssert( y == 0 || (data && dims >= 1 && (unsigned)y < (unsigned)size.p[0]) );
+ return data + step.p[0]*y;
+}
+
+inline const uchar* Mat::ptr(int y) const
+{
+ CV_DbgAssert( y == 0 || (data && dims >= 1 && (unsigned)y < (unsigned)size.p[0]) );
+ return data + step.p[0]*y;
+}
+
+template<typename _Tp> inline _Tp* Mat::ptr(int y)
+{
+ CV_DbgAssert( y == 0 || (data && dims >= 1 && (unsigned)y < (unsigned)size.p[0]) );
+ return (_Tp*)(data + step.p[0]*y);
+}
+
+template<typename _Tp> inline const _Tp* Mat::ptr(int y) const
+{
+ CV_DbgAssert( y == 0 || (data && dims >= 1 && (unsigned)y < (unsigned)size.p[0]) );
+ return (const _Tp*)(data + step.p[0]*y);
+}
+
+
+inline uchar* Mat::ptr(int i0, int i1)
+{
+ CV_DbgAssert( dims >= 2 && data &&
+ (unsigned)i0 < (unsigned)size.p[0] &&
+ (unsigned)i1 < (unsigned)size.p[1] );
+ return data + i0*step.p[0] + i1*step.p[1];
+}
+
+inline const uchar* Mat::ptr(int i0, int i1) const
+{
+ CV_DbgAssert( dims >= 2 && data &&
+ (unsigned)i0 < (unsigned)size.p[0] &&
+ (unsigned)i1 < (unsigned)size.p[1] );
+ return data + i0*step.p[0] + i1*step.p[1];
+}
+
+template<typename _Tp> inline _Tp* Mat::ptr(int i0, int i1)
+{
+ CV_DbgAssert( dims >= 2 && data &&
+ (unsigned)i0 < (unsigned)size.p[0] &&
+ (unsigned)i1 < (unsigned)size.p[1] );
+ return (_Tp*)(data + i0*step.p[0] + i1*step.p[1]);
+}
+
+template<typename _Tp> inline const _Tp* Mat::ptr(int i0, int i1) const
+{
+ CV_DbgAssert( dims >= 2 && data &&
+ (unsigned)i0 < (unsigned)size.p[0] &&
+ (unsigned)i1 < (unsigned)size.p[1] );
+ return (const _Tp*)(data + i0*step.p[0] + i1*step.p[1]);
+}
+
+inline uchar* Mat::ptr(int i0, int i1, int i2)
+{
+ CV_DbgAssert( dims >= 3 && data &&
+ (unsigned)i0 < (unsigned)size.p[0] &&
+ (unsigned)i1 < (unsigned)size.p[1] &&
+ (unsigned)i2 < (unsigned)size.p[2] );
+ return data + i0*step.p[0] + i1*step.p[1] + i2*step.p[2];
+}
+
+inline const uchar* Mat::ptr(int i0, int i1, int i2) const
+{
+ CV_DbgAssert( dims >= 3 && data &&
+ (unsigned)i0 < (unsigned)size.p[0] &&
+ (unsigned)i1 < (unsigned)size.p[1] &&
+ (unsigned)i2 < (unsigned)size.p[2] );
+ return data + i0*step.p[0] + i1*step.p[1] + i2*step.p[2];
+}
+
+template<typename _Tp> inline _Tp* Mat::ptr(int i0, int i1, int i2)
+{
+ CV_DbgAssert( dims >= 3 && data &&
+ (unsigned)i0 < (unsigned)size.p[0] &&
+ (unsigned)i1 < (unsigned)size.p[1] &&
+ (unsigned)i2 < (unsigned)size.p[2] );
+ return (_Tp*)(data + i0*step.p[0] + i1*step.p[1] + i2*step.p[2]);
+}
+
+template<typename _Tp> inline const _Tp* Mat::ptr(int i0, int i1, int i2) const
+{
+ CV_DbgAssert( dims >= 3 && data &&
+ (unsigned)i0 < (unsigned)size.p[0] &&
+ (unsigned)i1 < (unsigned)size.p[1] &&
+ (unsigned)i2 < (unsigned)size.p[2] );
+ return (const _Tp*)(data + i0*step.p[0] + i1*step.p[1] + i2*step.p[2]);
+}
+
+inline uchar* Mat::ptr(const int* idx)
+{
+ int i, d = dims;
+ uchar* p = data;
+ CV_DbgAssert( d >= 1 && p );
+ for( i = 0; i < d; i++ )
+ {
+ CV_DbgAssert( (unsigned)idx[i] < (unsigned)size.p[i] );
+ p += idx[i]*step.p[i];
+ }
+ return p;
+}
+
+inline const uchar* Mat::ptr(const int* idx) const
+{
+ int i, d = dims;
+ uchar* p = data;
+ CV_DbgAssert( d >= 1 && p );
+ for( i = 0; i < d; i++ )
+ {
+ CV_DbgAssert( (unsigned)idx[i] < (unsigned)size.p[i] );
+ p += idx[i]*step.p[i];
+ }
+ return p;
+}
+
+template<typename _Tp> inline _Tp& Mat::at(int i0, int i1)
+{
+ CV_DbgAssert( dims <= 2 && data && (unsigned)i0 < (unsigned)size.p[0] &&
+ (unsigned)(i1*DataType<_Tp>::channels) < (unsigned)(size.p[1]*channels()) &&
+ CV_ELEM_SIZE1(DataType<_Tp>::depth) == elemSize1());
+ return ((_Tp*)(data + step.p[0]*i0))[i1];
+}
+
+template<typename _Tp> inline const _Tp& Mat::at(int i0, int i1) const
+{
+ CV_DbgAssert( dims <= 2 && data && (unsigned)i0 < (unsigned)size.p[0] &&
+ (unsigned)(i1*DataType<_Tp>::channels) < (unsigned)(size.p[1]*channels()) &&
+ CV_ELEM_SIZE1(DataType<_Tp>::depth) == elemSize1());
+ return ((const _Tp*)(data + step.p[0]*i0))[i1];
+}
+
+template<typename _Tp> inline _Tp& Mat::at(Point pt)
+{
+ CV_DbgAssert( dims <= 2 && data && (unsigned)pt.y < (unsigned)size.p[0] &&
+ (unsigned)(pt.x*DataType<_Tp>::channels) < (unsigned)(size.p[1]*channels()) &&
+ CV_ELEM_SIZE1(DataType<_Tp>::depth) == elemSize1());
+ return ((_Tp*)(data + step.p[0]*pt.y))[pt.x];
+}
+
+template<typename _Tp> inline const _Tp& Mat::at(Point pt) const
+{
+ CV_DbgAssert( dims <= 2 && data && (unsigned)pt.y < (unsigned)size.p[0] &&
+ (unsigned)(pt.x*DataType<_Tp>::channels) < (unsigned)(size.p[1]*channels()) &&
+ CV_ELEM_SIZE1(DataType<_Tp>::depth) == elemSize1());
+ return ((const _Tp*)(data + step.p[0]*pt.y))[pt.x];
+}
+
+template<typename _Tp> inline _Tp& Mat::at(int i0)
+{
+ CV_DbgAssert( dims <= 2 && data &&
+ (unsigned)i0 < (unsigned)(size.p[0]*size.p[1]) &&
+ elemSize() == CV_ELEM_SIZE(DataType<_Tp>::type) );
+ if( isContinuous() || size.p[0] == 1 )
+ return ((_Tp*)data)[i0];
+ if( size.p[1] == 1 )
+ return *(_Tp*)(data + step.p[0]*i0);
+ int i = i0/cols, j = i0 - i*cols;
+ return ((_Tp*)(data + step.p[0]*i))[j];
+}
+
+template<typename _Tp> inline const _Tp& Mat::at(int i0) const
+{
+ CV_DbgAssert( dims <= 2 && data &&
+ (unsigned)i0 < (unsigned)(size.p[0]*size.p[1]) &&
+ elemSize() == CV_ELEM_SIZE(DataType<_Tp>::type) );
+ if( isContinuous() || size.p[0] == 1 )
+ return ((const _Tp*)data)[i0];
+ if( size.p[1] == 1 )
+ return *(const _Tp*)(data + step.p[0]*i0);
+ int i = i0/cols, j = i0 - i*cols;
+ return ((const _Tp*)(data + step.p[0]*i))[j];
+}
+
+template<typename _Tp> inline _Tp& Mat::at(int i0, int i1, int i2)
+{
+ CV_DbgAssert( elemSize() == CV_ELEM_SIZE(DataType<_Tp>::type) );
+ return *(_Tp*)ptr(i0, i1, i2);
+}
+template<typename _Tp> inline const _Tp& Mat::at(int i0, int i1, int i2) const
+{
+ CV_DbgAssert( elemSize() == CV_ELEM_SIZE(DataType<_Tp>::type) );
+ return *(const _Tp*)ptr(i0, i1, i2);
+}
+template<typename _Tp> inline _Tp& Mat::at(const int* idx)
+{
+ CV_DbgAssert( elemSize() == CV_ELEM_SIZE(DataType<_Tp>::type) );
+ return *(_Tp*)ptr(idx);
+}
+template<typename _Tp> inline const _Tp& Mat::at(const int* idx) const
+{
+ CV_DbgAssert( elemSize() == CV_ELEM_SIZE(DataType<_Tp>::type) );
+ return *(const _Tp*)ptr(idx);
+}
+template<typename _Tp, int n> _Tp& Mat::at(const Vec<int, n>& idx)
+{
+ CV_DbgAssert( elemSize() == CV_ELEM_SIZE(DataType<_Tp>::type) );
+ return *(_Tp*)ptr(idx.val);
+}
+template<typename _Tp, int n> inline const _Tp& Mat::at(const Vec<int, n>& idx) const
+{
+ CV_DbgAssert( elemSize() == CV_ELEM_SIZE(DataType<_Tp>::type) );
+ return *(const _Tp*)ptr(idx.val);
+}
+
+
+template<typename _Tp> inline MatConstIterator_<_Tp> Mat::begin() const
+{
+ CV_DbgAssert( elemSize() == sizeof(_Tp) );
+ return MatConstIterator_<_Tp>((const Mat_<_Tp>*)this);
+}
+
+template<typename _Tp> inline MatConstIterator_<_Tp> Mat::end() const
+{
+ CV_DbgAssert( elemSize() == sizeof(_Tp) );
+ MatConstIterator_<_Tp> it((const Mat_<_Tp>*)this);
+ it += total();
+ return it;
+}
+
+template<typename _Tp> inline MatIterator_<_Tp> Mat::begin()
+{
+ CV_DbgAssert( elemSize() == sizeof(_Tp) );
+ return MatIterator_<_Tp>((Mat_<_Tp>*)this);
+}
+
+template<typename _Tp> inline MatIterator_<_Tp> Mat::end()
+{
+ CV_DbgAssert( elemSize() == sizeof(_Tp) );
+ MatIterator_<_Tp> it((Mat_<_Tp>*)this);
+ it += total();
+ return it;
+}
+
+template<typename _Tp> inline Mat::operator vector<_Tp>() const
+{
+ vector<_Tp> v;
+ copyTo(v);
+ return v;
+}
+
+template<typename _Tp, int n> inline Mat::operator Vec<_Tp, n>() const
+{
+ CV_Assert( data && dims <= 2 && (rows == 1 || cols == 1) &&
+ rows + cols - 1 == n && channels() == 1 );
+
+ if( isContinuous() && type() == DataType<_Tp>::type )
+ return Vec<_Tp, n>((_Tp*)data);
+ Vec<_Tp, n> v; Mat tmp(rows, cols, DataType<_Tp>::type, v.val);
+ convertTo(tmp, tmp.type());
+ return v;
+}
+
+template<typename _Tp, int m, int n> inline Mat::operator Matx<_Tp, m, n>() const
+{
+ CV_Assert( data && dims <= 2 && rows == m && cols == n && channels() == 1 );
+
+ if( isContinuous() && type() == DataType<_Tp>::type )
+ return Matx<_Tp, m, n>((_Tp*)data);
+ Matx<_Tp, m, n> mtx; Mat tmp(rows, cols, DataType<_Tp>::type, mtx.val);
+ convertTo(tmp, tmp.type());
+ return mtx;
+}
+
+
+template<typename _Tp> inline void Mat::push_back(const _Tp& elem)
+{
+ if( !data )
+ {
+ CV_Assert((type()==0) || (DataType<_Tp>::type == type()));
+
+ *this = Mat(1, 1, DataType<_Tp>::type, (void*)&elem).clone();
+ return;
+ }
+ CV_Assert(DataType<_Tp>::type == type() && cols == 1
+ /* && dims == 2 (cols == 1 implies dims == 2) */);
+ uchar* tmp = dataend + step[0];
+ if( !isSubmatrix() && isContinuous() && tmp <= datalimit )
+ {
+ *(_Tp*)(data + (size.p[0]++)*step.p[0]) = elem;
+ dataend = tmp;
+ }
+ else
+ push_back_(&elem);
+}
+
+template<typename _Tp> inline void Mat::push_back(const Mat_<_Tp>& m)
+{
+ push_back((const Mat&)m);
+}
+
+inline Mat::MSize::MSize(int* _p) : p(_p) {}
+inline Size Mat::MSize::operator()() const
+{
+ CV_DbgAssert(p[-1] <= 2);
+ return Size(p[1], p[0]);
+}
+inline const int& Mat::MSize::operator[](int i) const { return p[i]; }
+inline int& Mat::MSize::operator[](int i) { return p[i]; }
+inline Mat::MSize::operator const int*() const { return p; }
+
+inline bool Mat::MSize::operator == (const MSize& sz) const
+{
+ int d = p[-1], dsz = sz.p[-1];
+ if( d != dsz )
+ return false;
+ if( d == 2 )
+ return p[0] == sz.p[0] && p[1] == sz.p[1];
+
+ for( int i = 0; i < d; i++ )
+ if( p[i] != sz.p[i] )
+ return false;
+ return true;
+}
+
+inline bool Mat::MSize::operator != (const MSize& sz) const
+{
+ return !(*this == sz);
+}
+
+inline Mat::MStep::MStep() { p = buf; p[0] = p[1] = 0; }
+inline Mat::MStep::MStep(size_t s) { p = buf; p[0] = s; p[1] = 0; }
+inline const size_t& Mat::MStep::operator[](int i) const { return p[i]; }
+inline size_t& Mat::MStep::operator[](int i) { return p[i]; }
+inline Mat::MStep::operator size_t() const
+{
+ CV_DbgAssert( p == buf );
+ return buf[0];
+}
+inline Mat::MStep& Mat::MStep::operator = (size_t s)
+{
+ CV_DbgAssert( p == buf );
+ buf[0] = s;
+ return *this;
+}
+
+static inline Mat cvarrToMatND(const CvArr* arr, bool copyData=false, int coiMode=0)
+{
+ return cvarrToMat(arr, copyData, true, coiMode);
+}
+
+///////////////////////////////////////////// SVD //////////////////////////////////////////////////////
+
+inline SVD::SVD() {}
+inline SVD::SVD( InputArray m, int flags ) { operator ()(m, flags); }
+inline void SVD::solveZ( InputArray m, OutputArray _dst )
+{
+ Mat mtx = m.getMat();
+ SVD svd(mtx, (mtx.rows >= mtx.cols ? 0 : SVD::FULL_UV));
+ _dst.create(svd.vt.cols, 1, svd.vt.type());
+ Mat dst = _dst.getMat();
+ svd.vt.row(svd.vt.rows-1).reshape(1,svd.vt.cols).copyTo(dst);
+}
+
+template<typename _Tp, int m, int n, int nm> inline void
+ SVD::compute( const Matx<_Tp, m, n>& a, Matx<_Tp, nm, 1>& w, Matx<_Tp, m, nm>& u, Matx<_Tp, n, nm>& vt )
+{
+ assert( nm == MIN(m, n));
+ Mat _a(a, false), _u(u, false), _w(w, false), _vt(vt, false);
+ SVD::compute(_a, _w, _u, _vt);
+ CV_Assert(_w.data == (uchar*)&w.val[0] && _u.data == (uchar*)&u.val[0] && _vt.data == (uchar*)&vt.val[0]);
+}
+
+template<typename _Tp, int m, int n, int nm> inline void
+SVD::compute( const Matx<_Tp, m, n>& a, Matx<_Tp, nm, 1>& w )
+{
+ assert( nm == MIN(m, n));
+ Mat _a(a, false), _w(w, false);
+ SVD::compute(_a, _w);
+ CV_Assert(_w.data == (uchar*)&w.val[0]);
+}
+
+template<typename _Tp, int m, int n, int nm, int nb> inline void
+SVD::backSubst( const Matx<_Tp, nm, 1>& w, const Matx<_Tp, m, nm>& u,
+ const Matx<_Tp, n, nm>& vt, const Matx<_Tp, m, nb>& rhs,
+ Matx<_Tp, n, nb>& dst )
+{
+ assert( nm == MIN(m, n));
+ Mat _u(u, false), _w(w, false), _vt(vt, false), _rhs(rhs, false), _dst(dst, false);
+ SVD::backSubst(_w, _u, _vt, _rhs, _dst);
+ CV_Assert(_dst.data == (uchar*)&dst.val[0]);
+}
+
+///////////////////////////////// Mat_<_Tp> ////////////////////////////////////
+
+template<typename _Tp> inline Mat_<_Tp>::Mat_()
+ : Mat() { flags = (flags & ~CV_MAT_TYPE_MASK) | DataType<_Tp>::type; }
+
+template<typename _Tp> inline Mat_<_Tp>::Mat_(int _rows, int _cols)
+ : Mat(_rows, _cols, DataType<_Tp>::type) {}
+
+template<typename _Tp> inline Mat_<_Tp>::Mat_(int _rows, int _cols, const _Tp& value)
+ : Mat(_rows, _cols, DataType<_Tp>::type) { *this = value; }
+
+template<typename _Tp> inline Mat_<_Tp>::Mat_(Size _sz)
+ : Mat(_sz.height, _sz.width, DataType<_Tp>::type) {}
+
+template<typename _Tp> inline Mat_<_Tp>::Mat_(Size _sz, const _Tp& value)
+ : Mat(_sz.height, _sz.width, DataType<_Tp>::type) { *this = value; }
+
+template<typename _Tp> inline Mat_<_Tp>::Mat_(int _dims, const int* _sz)
+ : Mat(_dims, _sz, DataType<_Tp>::type) {}
+
+template<typename _Tp> inline Mat_<_Tp>::Mat_(int _dims, const int* _sz, const _Tp& _s)
+ : Mat(_dims, _sz, DataType<_Tp>::type, Scalar(_s)) {}
+
+template<typename _Tp> inline Mat_<_Tp>::Mat_(int _dims, const int* _sz, _Tp* _data, const size_t* _steps)
+ : Mat(_dims, _sz, DataType<_Tp>::type, _data, _steps) {}
+
+template<typename _Tp> inline Mat_<_Tp>::Mat_(const Mat_<_Tp>& m, const Range* ranges)
+ : Mat(m, ranges) {}
+
+template<typename _Tp> inline Mat_<_Tp>::Mat_(const Mat& m)
+ : Mat() { flags = (flags & ~CV_MAT_TYPE_MASK) | DataType<_Tp>::type; *this = m; }
+
+template<typename _Tp> inline Mat_<_Tp>::Mat_(const Mat_& m)
+ : Mat(m) {}
+
+template<typename _Tp> inline Mat_<_Tp>::Mat_(int _rows, int _cols, _Tp* _data, size_t steps)
+ : Mat(_rows, _cols, DataType<_Tp>::type, _data, steps) {}
+
+template<typename _Tp> inline Mat_<_Tp>::Mat_(const Mat_& m, const Range& _rowRange, const Range& _colRange)
+ : Mat(m, _rowRange, _colRange) {}
+
+template<typename _Tp> inline Mat_<_Tp>::Mat_(const Mat_& m, const Rect& roi)
+ : Mat(m, roi) {}
+
+template<typename _Tp> template<int n> inline
+ Mat_<_Tp>::Mat_(const Vec<typename DataType<_Tp>::channel_type, n>& vec, bool copyData)
+ : Mat(n/DataType<_Tp>::channels, 1, DataType<_Tp>::type, (void*)&vec)
+{
+ CV_Assert(n%DataType<_Tp>::channels == 0);
+ if( copyData )
+ *this = clone();
+}
+
+template<typename _Tp> template<int m, int n> inline
+ Mat_<_Tp>::Mat_(const Matx<typename DataType<_Tp>::channel_type,m,n>& M, bool copyData)
+ : Mat(m, n/DataType<_Tp>::channels, DataType<_Tp>::type, (void*)&M)
+{
+ CV_Assert(n % DataType<_Tp>::channels == 0);
+ if( copyData )
+ *this = clone();
+}
+
+template<typename _Tp> inline Mat_<_Tp>::Mat_(const Point_<typename DataType<_Tp>::channel_type>& pt, bool copyData)
+ : Mat(2/DataType<_Tp>::channels, 1, DataType<_Tp>::type, (void*)&pt)
+{
+ CV_Assert(2 % DataType<_Tp>::channels == 0);
+ if( copyData )
+ *this = clone();
+}
+
+template<typename _Tp> inline Mat_<_Tp>::Mat_(const Point3_<typename DataType<_Tp>::channel_type>& pt, bool copyData)
+ : Mat(3/DataType<_Tp>::channels, 1, DataType<_Tp>::type, (void*)&pt)
+{
+ CV_Assert(3 % DataType<_Tp>::channels == 0);
+ if( copyData )
+ *this = clone();
+}
+
+template<typename _Tp> inline Mat_<_Tp>::Mat_(const MatCommaInitializer_<_Tp>& commaInitializer)
+ : Mat(commaInitializer) {}
+
+template<typename _Tp> inline Mat_<_Tp>::Mat_(const vector<_Tp>& vec, bool copyData)
+ : Mat(vec, copyData) {}
+
+template<typename _Tp> inline Mat_<_Tp>& Mat_<_Tp>::operator = (const Mat& m)
+{
+ if( DataType<_Tp>::type == m.type() )
+ {
+ Mat::operator = (m);
+ return *this;
+ }
+ if( DataType<_Tp>::depth == m.depth() )
+ {
+ return (*this = m.reshape(DataType<_Tp>::channels, m.dims, 0));
+ }
+ CV_DbgAssert(DataType<_Tp>::channels == m.channels());
+ m.convertTo(*this, type());
+ return *this;
+}
+
+template<typename _Tp> inline Mat_<_Tp>& Mat_<_Tp>::operator = (const Mat_& m)
+{
+ Mat::operator=(m);
+ return *this;
+}
+
+template<typename _Tp> inline Mat_<_Tp>& Mat_<_Tp>::operator = (const _Tp& s)
+{
+ typedef typename DataType<_Tp>::vec_type VT;
+ Mat::operator=(Scalar((const VT&)s));
+ return *this;
+}
+
+template<typename _Tp> inline void Mat_<_Tp>::create(int _rows, int _cols)
+{
+ Mat::create(_rows, _cols, DataType<_Tp>::type);
+}
+
+template<typename _Tp> inline void Mat_<_Tp>::create(Size _sz)
+{
+ Mat::create(_sz, DataType<_Tp>::type);
+}
+
+template<typename _Tp> inline void Mat_<_Tp>::create(int _dims, const int* _sz)
+{
+ Mat::create(_dims, _sz, DataType<_Tp>::type);
+}
+
+
+template<typename _Tp> inline Mat_<_Tp> Mat_<_Tp>::cross(const Mat_& m) const
+{ return Mat_<_Tp>(Mat::cross(m)); }
+
+template<typename _Tp> template<typename T2> inline Mat_<_Tp>::operator Mat_<T2>() const
+{ return Mat_<T2>(*this); }
+
+template<typename _Tp> inline Mat_<_Tp> Mat_<_Tp>::row(int y) const
+{ return Mat_(*this, Range(y, y+1), Range::all()); }
+template<typename _Tp> inline Mat_<_Tp> Mat_<_Tp>::col(int x) const
+{ return Mat_(*this, Range::all(), Range(x, x+1)); }
+template<typename _Tp> inline Mat_<_Tp> Mat_<_Tp>::diag(int d) const
+{ return Mat_(Mat::diag(d)); }
+template<typename _Tp> inline Mat_<_Tp> Mat_<_Tp>::clone() const
+{ return Mat_(Mat::clone()); }
+
+template<typename _Tp> inline size_t Mat_<_Tp>::elemSize() const
+{
+ CV_DbgAssert( Mat::elemSize() == sizeof(_Tp) );
+ return sizeof(_Tp);
+}
+
+template<typename _Tp> inline size_t Mat_<_Tp>::elemSize1() const
+{
+ CV_DbgAssert( Mat::elemSize1() == sizeof(_Tp)/DataType<_Tp>::channels );
+ return sizeof(_Tp)/DataType<_Tp>::channels;
+}
+template<typename _Tp> inline int Mat_<_Tp>::type() const
+{
+ CV_DbgAssert( Mat::type() == DataType<_Tp>::type );
+ return DataType<_Tp>::type;
+}
+template<typename _Tp> inline int Mat_<_Tp>::depth() const
+{
+ CV_DbgAssert( Mat::depth() == DataType<_Tp>::depth );
+ return DataType<_Tp>::depth;
+}
+template<typename _Tp> inline int Mat_<_Tp>::channels() const
+{
+ CV_DbgAssert( Mat::channels() == DataType<_Tp>::channels );
+ return DataType<_Tp>::channels;
+}
+template<typename _Tp> inline size_t Mat_<_Tp>::stepT(int i) const { return step.p[i]/elemSize(); }
+template<typename _Tp> inline size_t Mat_<_Tp>::step1(int i) const { return step.p[i]/elemSize1(); }
+
+template<typename _Tp> inline Mat_<_Tp>& Mat_<_Tp>::adjustROI( int dtop, int dbottom, int dleft, int dright )
+{ return (Mat_<_Tp>&)(Mat::adjustROI(dtop, dbottom, dleft, dright)); }
+
+template<typename _Tp> inline Mat_<_Tp> Mat_<_Tp>::operator()( const Range& _rowRange, const Range& _colRange ) const
+{ return Mat_<_Tp>(*this, _rowRange, _colRange); }
+
+template<typename _Tp> inline Mat_<_Tp> Mat_<_Tp>::operator()( const Rect& roi ) const
+{ return Mat_<_Tp>(*this, roi); }
+
+template<typename _Tp> inline Mat_<_Tp> Mat_<_Tp>::operator()( const Range* ranges ) const
+{ return Mat_<_Tp>(*this, ranges); }
+
+template<typename _Tp> inline _Tp* Mat_<_Tp>::operator [](int y)
+{ return (_Tp*)ptr(y); }
+template<typename _Tp> inline const _Tp* Mat_<_Tp>::operator [](int y) const
+{ return (const _Tp*)ptr(y); }
+
+template<typename _Tp> inline _Tp& Mat_<_Tp>::operator ()(int i0, int i1)
+{
+ CV_DbgAssert( dims <= 2 && data &&
+ (unsigned)i0 < (unsigned)size.p[0] &&
+ (unsigned)i1 < (unsigned)size.p[1] &&
+ type() == DataType<_Tp>::type );
+ return ((_Tp*)(data + step.p[0]*i0))[i1];
+}
+
+template<typename _Tp> inline const _Tp& Mat_<_Tp>::operator ()(int i0, int i1) const
+{
+ CV_DbgAssert( dims <= 2 && data &&
+ (unsigned)i0 < (unsigned)size.p[0] &&
+ (unsigned)i1 < (unsigned)size.p[1] &&
+ type() == DataType<_Tp>::type );
+ return ((const _Tp*)(data + step.p[0]*i0))[i1];
+}
+
+template<typename _Tp> inline _Tp& Mat_<_Tp>::operator ()(Point pt)
+{
+ CV_DbgAssert( dims <= 2 && data &&
+ (unsigned)pt.y < (unsigned)size.p[0] &&
+ (unsigned)pt.x < (unsigned)size.p[1] &&
+ type() == DataType<_Tp>::type );
+ return ((_Tp*)(data + step.p[0]*pt.y))[pt.x];
+}
+
+template<typename _Tp> inline const _Tp& Mat_<_Tp>::operator ()(Point pt) const
+{
+ CV_DbgAssert( dims <= 2 && data &&
+ (unsigned)pt.y < (unsigned)size.p[0] &&
+ (unsigned)pt.x < (unsigned)size.p[1] &&
+ type() == DataType<_Tp>::type );
+ return ((const _Tp*)(data + step.p[0]*pt.y))[pt.x];
+}
+
+template<typename _Tp> inline _Tp& Mat_<_Tp>::operator ()(const int* idx)
+{
+ return Mat::at<_Tp>(idx);
+}
+
+template<typename _Tp> inline const _Tp& Mat_<_Tp>::operator ()(const int* idx) const
+{
+ return Mat::at<_Tp>(idx);
+}
+
+template<typename _Tp> template<int n> inline _Tp& Mat_<_Tp>::operator ()(const Vec<int, n>& idx)
+{
+ return Mat::at<_Tp>(idx);
+}
+
+template<typename _Tp> template<int n> inline const _Tp& Mat_<_Tp>::operator ()(const Vec<int, n>& idx) const
+{
+ return Mat::at<_Tp>(idx);
+}
+
+template<typename _Tp> inline _Tp& Mat_<_Tp>::operator ()(int i0)
+{
+ return this->at<_Tp>(i0);
+}
+
+template<typename _Tp> inline const _Tp& Mat_<_Tp>::operator ()(int i0) const
+{
+ return this->at<_Tp>(i0);
+}
+
+template<typename _Tp> inline _Tp& Mat_<_Tp>::operator ()(int i0, int i1, int i2)
+{
+ return this->at<_Tp>(i0, i1, i2);
+}
+
+template<typename _Tp> inline const _Tp& Mat_<_Tp>::operator ()(int i0, int i1, int i2) const
+{
+ return this->at<_Tp>(i0, i1, i2);
+}
+
+
+template<typename _Tp> inline Mat_<_Tp>::operator vector<_Tp>() const
+{
+ vector<_Tp> v;
+ copyTo(v);
+ return v;
+}
+
+template<typename _Tp> template<int n> inline Mat_<_Tp>::operator Vec<typename DataType<_Tp>::channel_type, n>() const
+{
+ CV_Assert(n % DataType<_Tp>::channels == 0);
+ return this->Mat::operator Vec<typename DataType<_Tp>::channel_type, n>();
+}
+
+template<typename _Tp> template<int m, int n> inline Mat_<_Tp>::operator Matx<typename DataType<_Tp>::channel_type, m, n>() const
+{
+ CV_Assert(n % DataType<_Tp>::channels == 0);
+
+ Matx<typename DataType<_Tp>::channel_type, m, n> res = this->Mat::operator Matx<typename DataType<_Tp>::channel_type, m, n>();
+ return res;
+}
+
+template<typename T1, typename T2, typename Op> inline void
+process( const Mat_<T1>& m1, Mat_<T2>& m2, Op op )
+{
+ int y, x, rows = m1.rows, cols = m1.cols;
+
+ CV_DbgAssert( m1.size() == m2.size() );
+
+ for( y = 0; y < rows; y++ )
+ {
+ const T1* src = m1[y];
+ T2* dst = m2[y];
+
+ for( x = 0; x < cols; x++ )
+ dst[x] = op(src[x]);
+ }
+}
+
+template<typename T1, typename T2, typename T3, typename Op> inline void
+process( const Mat_<T1>& m1, const Mat_<T2>& m2, Mat_<T3>& m3, Op op )
+{
+ int y, x, rows = m1.rows, cols = m1.cols;
+
+ CV_DbgAssert( m1.size() == m2.size() );
+
+ for( y = 0; y < rows; y++ )
+ {
+ const T1* src1 = m1[y];
+ const T2* src2 = m2[y];
+ T3* dst = m3[y];
+
+ for( x = 0; x < cols; x++ )
+ dst[x] = op( src1[x], src2[x] );
+ }
+}
+
+
+/////////////////////////////// Input/Output Arrays /////////////////////////////////
+
+template<typename _Tp> inline _InputArray::_InputArray(const vector<_Tp>& vec)
+ : flags(FIXED_TYPE + STD_VECTOR + DataType<_Tp>::type), obj((void*)&vec) {}
+
+template<typename _Tp> inline _InputArray::_InputArray(const vector<vector<_Tp> >& vec)
+ : flags(FIXED_TYPE + STD_VECTOR_VECTOR + DataType<_Tp>::type), obj((void*)&vec) {}
+
+template<typename _Tp> inline _InputArray::_InputArray(const vector<Mat_<_Tp> >& vec)
+ : flags(FIXED_TYPE + STD_VECTOR_MAT + DataType<_Tp>::type), obj((void*)&vec) {}
+
+template<typename _Tp, int m, int n> inline _InputArray::_InputArray(const Matx<_Tp, m, n>& mtx)
+ : flags(FIXED_TYPE + FIXED_SIZE + MATX + DataType<_Tp>::type), obj((void*)&mtx), sz(n, m) {}
+
+template<typename _Tp> inline _InputArray::_InputArray(const _Tp* vec, int n)
+ : flags(FIXED_TYPE + FIXED_SIZE + MATX + DataType<_Tp>::type), obj((void*)vec), sz(n, 1) {}
+
+inline _InputArray::_InputArray(const Scalar& s)
+ : flags(FIXED_TYPE + FIXED_SIZE + MATX + CV_64F), obj((void*)&s), sz(1, 4) {}
+
+template<typename _Tp> inline _InputArray::_InputArray(const Mat_<_Tp>& m)
+ : flags(FIXED_TYPE + MAT + DataType<_Tp>::type), obj((void*)&m) {}
+
+template<typename _Tp> inline _OutputArray::_OutputArray(vector<_Tp>& vec)
+ : _InputArray(vec) {}
+template<typename _Tp> inline _OutputArray::_OutputArray(vector<vector<_Tp> >& vec)
+ : _InputArray(vec) {}
+template<typename _Tp> inline _OutputArray::_OutputArray(vector<Mat_<_Tp> >& vec)
+ : _InputArray(vec) {}
+template<typename _Tp> inline _OutputArray::_OutputArray(Mat_<_Tp>& m)
+ : _InputArray(m) {}
+template<typename _Tp, int m, int n> inline _OutputArray::_OutputArray(Matx<_Tp, m, n>& mtx)
+ : _InputArray(mtx) {}
+template<typename _Tp> inline _OutputArray::_OutputArray(_Tp* vec, int n)
+ : _InputArray(vec, n) {}
+
+template<typename _Tp> inline _OutputArray::_OutputArray(const vector<_Tp>& vec)
+ : _InputArray(vec) {flags |= FIXED_SIZE;}
+template<typename _Tp> inline _OutputArray::_OutputArray(const vector<vector<_Tp> >& vec)
+ : _InputArray(vec) {flags |= FIXED_SIZE;}
+template<typename _Tp> inline _OutputArray::_OutputArray(const vector<Mat_<_Tp> >& vec)
+ : _InputArray(vec) {flags |= FIXED_SIZE;}
+
+template<typename _Tp> inline _OutputArray::_OutputArray(const Mat_<_Tp>& m)
+ : _InputArray(m) {flags |= FIXED_SIZE;}
+template<typename _Tp, int m, int n> inline _OutputArray::_OutputArray(const Matx<_Tp, m, n>& mtx)
+ : _InputArray(mtx) {}
+template<typename _Tp> inline _OutputArray::_OutputArray(const _Tp* vec, int n)
+ : _InputArray(vec, n) {}
+
+//////////////////////////////////// Matrix Expressions /////////////////////////////////////////
+
+class CV_EXPORTS MatOp
+{
+public:
+ MatOp() {};
+ virtual ~MatOp() {};
+
+ virtual bool elementWise(const MatExpr& expr) const;
+ virtual void assign(const MatExpr& expr, Mat& m, int type=-1) const = 0;
+ virtual void roi(const MatExpr& expr, const Range& rowRange,
+ const Range& colRange, MatExpr& res) const;
+ virtual void diag(const MatExpr& expr, int d, MatExpr& res) const;
+ virtual void augAssignAdd(const MatExpr& expr, Mat& m) const;
+ virtual void augAssignSubtract(const MatExpr& expr, Mat& m) const;
+ virtual void augAssignMultiply(const MatExpr& expr, Mat& m) const;
+ virtual void augAssignDivide(const MatExpr& expr, Mat& m) const;
+ virtual void augAssignAnd(const MatExpr& expr, Mat& m) const;
+ virtual void augAssignOr(const MatExpr& expr, Mat& m) const;
+ virtual void augAssignXor(const MatExpr& expr, Mat& m) const;
+
+ virtual void add(const MatExpr& expr1, const MatExpr& expr2, MatExpr& res) const;
+ virtual void add(const MatExpr& expr1, const Scalar& s, MatExpr& res) const;
+
+ virtual void subtract(const MatExpr& expr1, const MatExpr& expr2, MatExpr& res) const;
+ virtual void subtract(const Scalar& s, const MatExpr& expr, MatExpr& res) const;
+
+ virtual void multiply(const MatExpr& expr1, const MatExpr& expr2, MatExpr& res, double scale=1) const;
+ virtual void multiply(const MatExpr& expr1, double s, MatExpr& res) const;
+
+ virtual void divide(const MatExpr& expr1, const MatExpr& expr2, MatExpr& res, double scale=1) const;
+ virtual void divide(double s, const MatExpr& expr, MatExpr& res) const;
+
+ virtual void abs(const MatExpr& expr, MatExpr& res) const;
+
+ virtual void transpose(const MatExpr& expr, MatExpr& res) const;
+ virtual void matmul(const MatExpr& expr1, const MatExpr& expr2, MatExpr& res) const;
+ virtual void invert(const MatExpr& expr, int method, MatExpr& res) const;
+
+ virtual Size size(const MatExpr& expr) const;
+ virtual int type(const MatExpr& expr) const;
+};
+
+
+class CV_EXPORTS MatExpr
+{
+public:
+ MatExpr() : op(0), flags(0), a(Mat()), b(Mat()), c(Mat()), alpha(0), beta(0), s(Scalar()) {}
+ MatExpr(const MatOp* _op, int _flags, const Mat& _a=Mat(), const Mat& _b=Mat(),
+ const Mat& _c=Mat(), double _alpha=1, double _beta=1, const Scalar& _s=Scalar())
+ : op(_op), flags(_flags), a(_a), b(_b), c(_c), alpha(_alpha), beta(_beta), s(_s) {}
+ explicit MatExpr(const Mat& m);
+ operator Mat() const
+ {
+ Mat m;
+ op->assign(*this, m);
+ return m;
+ }
+
+ template<typename _Tp> operator Mat_<_Tp>() const
+ {
+ Mat_<_Tp> m;
+ op->assign(*this, m, DataType<_Tp>::type);
+ return m;
+ }
+
+ MatExpr row(int y) const;
+ MatExpr col(int x) const;
+ MatExpr diag(int d=0) const;
+ MatExpr operator()( const Range& rowRange, const Range& colRange ) const;
+ MatExpr operator()( const Rect& roi ) const;
+
+ Mat cross(const Mat& m) const;
+ double dot(const Mat& m) const;
+
+ MatExpr t() const;
+ MatExpr inv(int method = DECOMP_LU) const;
+ MatExpr mul(const MatExpr& e, double scale=1) const;
+ MatExpr mul(const Mat& m, double scale=1) const;
+
+ Size size() const;
+ int type() const;
+
+ const MatOp* op;
+ int flags;
+
+ Mat a, b, c;
+ double alpha, beta;
+ Scalar s;
+};
+
+
+CV_EXPORTS MatExpr operator + (const Mat& a, const Mat& b);
+CV_EXPORTS MatExpr operator + (const Mat& a, const Scalar& s);
+CV_EXPORTS MatExpr operator + (const Scalar& s, const Mat& a);
+CV_EXPORTS MatExpr operator + (const MatExpr& e, const Mat& m);
+CV_EXPORTS MatExpr operator + (const Mat& m, const MatExpr& e);
+CV_EXPORTS MatExpr operator + (const MatExpr& e, const Scalar& s);
+CV_EXPORTS MatExpr operator + (const Scalar& s, const MatExpr& e);
+CV_EXPORTS MatExpr operator + (const MatExpr& e1, const MatExpr& e2);
+
+CV_EXPORTS MatExpr operator - (const Mat& a, const Mat& b);
+CV_EXPORTS MatExpr operator - (const Mat& a, const Scalar& s);
+CV_EXPORTS MatExpr operator - (const Scalar& s, const Mat& a);
+CV_EXPORTS MatExpr operator - (const MatExpr& e, const Mat& m);
+CV_EXPORTS MatExpr operator - (const Mat& m, const MatExpr& e);
+CV_EXPORTS MatExpr operator - (const MatExpr& e, const Scalar& s);
+CV_EXPORTS MatExpr operator - (const Scalar& s, const MatExpr& e);
+CV_EXPORTS MatExpr operator - (const MatExpr& e1, const MatExpr& e2);
+
+CV_EXPORTS MatExpr operator - (const Mat& m);
+CV_EXPORTS MatExpr operator - (const MatExpr& e);
+
+CV_EXPORTS MatExpr operator * (const Mat& a, const Mat& b);
+CV_EXPORTS MatExpr operator * (const Mat& a, double s);
+CV_EXPORTS MatExpr operator * (double s, const Mat& a);
+CV_EXPORTS MatExpr operator * (const MatExpr& e, const Mat& m);
+CV_EXPORTS MatExpr operator * (const Mat& m, const MatExpr& e);
+CV_EXPORTS MatExpr operator * (const MatExpr& e, double s);
+CV_EXPORTS MatExpr operator * (double s, const MatExpr& e);
+CV_EXPORTS MatExpr operator * (const MatExpr& e1, const MatExpr& e2);
+
+CV_EXPORTS MatExpr operator / (const Mat& a, const Mat& b);
+CV_EXPORTS MatExpr operator / (const Mat& a, double s);
+CV_EXPORTS MatExpr operator / (double s, const Mat& a);
+CV_EXPORTS MatExpr operator / (const MatExpr& e, const Mat& m);
+CV_EXPORTS MatExpr operator / (const Mat& m, const MatExpr& e);
+CV_EXPORTS MatExpr operator / (const MatExpr& e, double s);
+CV_EXPORTS MatExpr operator / (double s, const MatExpr& e);
+CV_EXPORTS MatExpr operator / (const MatExpr& e1, const MatExpr& e2);
+
+CV_EXPORTS MatExpr operator < (const Mat& a, const Mat& b);
+CV_EXPORTS MatExpr operator < (const Mat& a, double s);
+CV_EXPORTS MatExpr operator < (double s, const Mat& a);
+
+CV_EXPORTS MatExpr operator <= (const Mat& a, const Mat& b);
+CV_EXPORTS MatExpr operator <= (const Mat& a, double s);
+CV_EXPORTS MatExpr operator <= (double s, const Mat& a);
+
+CV_EXPORTS MatExpr operator == (const Mat& a, const Mat& b);
+CV_EXPORTS MatExpr operator == (const Mat& a, double s);
+CV_EXPORTS MatExpr operator == (double s, const Mat& a);
+
+CV_EXPORTS MatExpr operator != (const Mat& a, const Mat& b);
+CV_EXPORTS MatExpr operator != (const Mat& a, double s);
+CV_EXPORTS MatExpr operator != (double s, const Mat& a);
+
+CV_EXPORTS MatExpr operator >= (const Mat& a, const Mat& b);
+CV_EXPORTS MatExpr operator >= (const Mat& a, double s);
+CV_EXPORTS MatExpr operator >= (double s, const Mat& a);
+
+CV_EXPORTS MatExpr operator > (const Mat& a, const Mat& b);
+CV_EXPORTS MatExpr operator > (const Mat& a, double s);
+CV_EXPORTS MatExpr operator > (double s, const Mat& a);
+
+CV_EXPORTS MatExpr min(const Mat& a, const Mat& b);
+CV_EXPORTS MatExpr min(const Mat& a, double s);
+CV_EXPORTS MatExpr min(double s, const Mat& a);
+
+CV_EXPORTS MatExpr max(const Mat& a, const Mat& b);
+CV_EXPORTS MatExpr max(const Mat& a, double s);
+CV_EXPORTS MatExpr max(double s, const Mat& a);
+
+template<typename _Tp> static inline MatExpr min(const Mat_<_Tp>& a, const Mat_<_Tp>& b)
+{
+ return cv::min((const Mat&)a, (const Mat&)b);
+}
+
+template<typename _Tp> static inline MatExpr min(const Mat_<_Tp>& a, double s)
+{
+ return cv::min((const Mat&)a, s);
+}
+
+template<typename _Tp> static inline MatExpr min(double s, const Mat_<_Tp>& a)
+{
+ return cv::min((const Mat&)a, s);
+}
+
+template<typename _Tp> static inline MatExpr max(const Mat_<_Tp>& a, const Mat_<_Tp>& b)
+{
+ return cv::max((const Mat&)a, (const Mat&)b);
+}
+
+template<typename _Tp> static inline MatExpr max(const Mat_<_Tp>& a, double s)
+{
+ return cv::max((const Mat&)a, s);
+}
+
+template<typename _Tp> static inline MatExpr max(double s, const Mat_<_Tp>& a)
+{
+ return cv::max((const Mat&)a, s);
+}
+
+template<typename _Tp> static inline void min(const Mat_<_Tp>& a, const Mat_<_Tp>& b, Mat_<_Tp>& c)
+{
+ cv::min((const Mat&)a, (const Mat&)b, (Mat&)c);
+}
+
+template<typename _Tp> static inline void min(const Mat_<_Tp>& a, double s, Mat_<_Tp>& c)
+{
+ cv::min((const Mat&)a, s, (Mat&)c);
+}
+
+template<typename _Tp> static inline void min(double s, const Mat_<_Tp>& a, Mat_<_Tp>& c)
+{
+ cv::min((const Mat&)a, s, (Mat&)c);
+}
+
+template<typename _Tp> static inline void max(const Mat_<_Tp>& a, const Mat_<_Tp>& b, Mat_<_Tp>& c)
+{
+ cv::max((const Mat&)a, (const Mat&)b, (Mat&)c);
+}
+
+template<typename _Tp> static inline void max(const Mat_<_Tp>& a, double s, Mat_<_Tp>& c)
+{
+ cv::max((const Mat&)a, s, (Mat&)c);
+}
+
+template<typename _Tp> static inline void max(double s, const Mat_<_Tp>& a, Mat_<_Tp>& c)
+{
+ cv::max((const Mat&)a, s, (Mat&)c);
+}
+
+
+CV_EXPORTS MatExpr operator & (const Mat& a, const Mat& b);
+CV_EXPORTS MatExpr operator & (const Mat& a, const Scalar& s);
+CV_EXPORTS MatExpr operator & (const Scalar& s, const Mat& a);
+
+CV_EXPORTS MatExpr operator | (const Mat& a, const Mat& b);
+CV_EXPORTS MatExpr operator | (const Mat& a, const Scalar& s);
+CV_EXPORTS MatExpr operator | (const Scalar& s, const Mat& a);
+
+CV_EXPORTS MatExpr operator ^ (const Mat& a, const Mat& b);
+CV_EXPORTS MatExpr operator ^ (const Mat& a, const Scalar& s);
+CV_EXPORTS MatExpr operator ^ (const Scalar& s, const Mat& a);
+
+CV_EXPORTS MatExpr operator ~(const Mat& m);
+
+CV_EXPORTS MatExpr abs(const Mat& m);
+CV_EXPORTS MatExpr abs(const MatExpr& e);
+
+template<typename _Tp> static inline MatExpr abs(const Mat_<_Tp>& m)
+{
+ return cv::abs((const Mat&)m);
+}
+
+////////////////////////////// Augmenting algebraic operations //////////////////////////////////
+
+inline Mat& Mat::operator = (const MatExpr& e)
+{
+ e.op->assign(e, *this);
+ return *this;
+}
+
+template<typename _Tp> inline Mat_<_Tp>::Mat_(const MatExpr& e)
+{
+ e.op->assign(e, *this, DataType<_Tp>::type);
+}
+
+template<typename _Tp> Mat_<_Tp>& Mat_<_Tp>::operator = (const MatExpr& e)
+{
+ e.op->assign(e, *this, DataType<_Tp>::type);
+ return *this;
+}
+
+static inline Mat& operator += (const Mat& a, const Mat& b)
+{
+ add(a, b, (Mat&)a);
+ return (Mat&)a;
+}
+
+static inline Mat& operator += (const Mat& a, const Scalar& s)
+{
+ add(a, s, (Mat&)a);
+ return (Mat&)a;
+}
+
+template<typename _Tp> static inline
+Mat_<_Tp>& operator += (const Mat_<_Tp>& a, const Mat_<_Tp>& b)
+{
+ add(a, b, (Mat&)a);
+ return (Mat_<_Tp>&)a;
+}
+
+template<typename _Tp> static inline
+Mat_<_Tp>& operator += (const Mat_<_Tp>& a, const Scalar& s)
+{
+ add(a, s, (Mat&)a);
+ return (Mat_<_Tp>&)a;
+}
+
+static inline Mat& operator += (const Mat& a, const MatExpr& b)
+{
+ b.op->augAssignAdd(b, (Mat&)a);
+ return (Mat&)a;
+}
+
+template<typename _Tp> static inline
+Mat_<_Tp>& operator += (const Mat_<_Tp>& a, const MatExpr& b)
+{
+ b.op->augAssignAdd(b, (Mat&)a);
+ return (Mat_<_Tp>&)a;
+}
+
+static inline Mat& operator -= (const Mat& a, const Mat& b)
+{
+ subtract(a, b, (Mat&)a);
+ return (Mat&)a;
+}
+
+static inline Mat& operator -= (const Mat& a, const Scalar& s)
+{
+ subtract(a, s, (Mat&)a);
+ return (Mat&)a;
+}
+
+template<typename _Tp> static inline
+Mat_<_Tp>& operator -= (const Mat_<_Tp>& a, const Mat_<_Tp>& b)
+{
+ subtract(a, b, (Mat&)a);
+ return (Mat_<_Tp>&)a;
+}
+
+template<typename _Tp> static inline
+Mat_<_Tp>& operator -= (const Mat_<_Tp>& a, const Scalar& s)
+{
+ subtract(a, s, (Mat&)a);
+ return (Mat_<_Tp>&)a;
+}
+
+static inline Mat& operator -= (const Mat& a, const MatExpr& b)
+{
+ b.op->augAssignSubtract(b, (Mat&)a);
+ return (Mat&)a;
+}
+
+template<typename _Tp> static inline
+Mat_<_Tp>& operator -= (const Mat_<_Tp>& a, const MatExpr& b)
+{
+ b.op->augAssignSubtract(b, (Mat&)a);
+ return (Mat_<_Tp>&)a;
+}
+
+static inline Mat& operator *= (const Mat& a, const Mat& b)
+{
+ gemm(a, b, 1, Mat(), 0, (Mat&)a, 0);
+ return (Mat&)a;
+}
+
+static inline Mat& operator *= (const Mat& a, double s)
+{
+ a.convertTo((Mat&)a, -1, s);
+ return (Mat&)a;
+}
+
+template<typename _Tp> static inline
+Mat_<_Tp>& operator *= (const Mat_<_Tp>& a, const Mat_<_Tp>& b)
+{
+ gemm(a, b, 1, Mat(), 0, (Mat&)a, 0);
+ return (Mat_<_Tp>&)a;
+}
+
+template<typename _Tp> static inline
+Mat_<_Tp>& operator *= (const Mat_<_Tp>& a, double s)
+{
+ a.convertTo((Mat&)a, -1, s);
+ return (Mat_<_Tp>&)a;
+}
+
+static inline Mat& operator *= (const Mat& a, const MatExpr& b)
+{
+ b.op->augAssignMultiply(b, (Mat&)a);
+ return (Mat&)a;
+}
+
+template<typename _Tp> static inline
+Mat_<_Tp>& operator *= (const Mat_<_Tp>& a, const MatExpr& b)
+{
+ b.op->augAssignMultiply(b, (Mat&)a);
+ return (Mat_<_Tp>&)a;
+}
+
+static inline Mat& operator /= (const Mat& a, const Mat& b)
+{
+ divide(a, b, (Mat&)a);
+ return (Mat&)a;
+}
+
+static inline Mat& operator /= (const Mat& a, double s)
+{
+ a.convertTo((Mat&)a, -1, 1./s);
+ return (Mat&)a;
+}
+
+template<typename _Tp> static inline
+Mat_<_Tp>& operator /= (const Mat_<_Tp>& a, const Mat_<_Tp>& b)
+{
+ divide(a, b, (Mat&)a);
+ return (Mat_<_Tp>&)a;
+}
+
+template<typename _Tp> static inline
+Mat_<_Tp>& operator /= (const Mat_<_Tp>& a, double s)
+{
+ a.convertTo((Mat&)a, -1, 1./s);
+ return (Mat_<_Tp>&)a;
+}
+
+static inline Mat& operator /= (const Mat& a, const MatExpr& b)
+{
+ b.op->augAssignDivide(b, (Mat&)a);
+ return (Mat&)a;
+}
+
+template<typename _Tp> static inline
+Mat_<_Tp>& operator /= (const Mat_<_Tp>& a, const MatExpr& b)
+{
+ b.op->augAssignDivide(b, (Mat&)a);
+ return (Mat_<_Tp>&)a;
+}
+
+////////////////////////////// Logical operations ///////////////////////////////
+
+static inline Mat& operator &= (const Mat& a, const Mat& b)
+{
+ bitwise_and(a, b, (Mat&)a);
+ return (Mat&)a;
+}
+
+static inline Mat& operator &= (const Mat& a, const Scalar& s)
+{
+ bitwise_and(a, s, (Mat&)a);
+ return (Mat&)a;
+}
+
+template<typename _Tp> static inline Mat_<_Tp>&
+operator &= (const Mat_<_Tp>& a, const Mat_<_Tp>& b)
+{
+ bitwise_and(a, b, (Mat&)a);
+ return (Mat_<_Tp>&)a;
+}
+
+template<typename _Tp> static inline Mat_<_Tp>&
+operator &= (const Mat_<_Tp>& a, const Scalar& s)
+{
+ bitwise_and(a, s, (Mat&)a);
+ return (Mat_<_Tp>&)a;
+}
+
+static inline Mat& operator |= (const Mat& a, const Mat& b)
+{
+ bitwise_or(a, b, (Mat&)a);
+ return (Mat&)a;
+}
+
+static inline Mat& operator |= (const Mat& a, const Scalar& s)
+{
+ bitwise_or(a, s, (Mat&)a);
+ return (Mat&)a;
+}
+
+template<typename _Tp> static inline Mat_<_Tp>&
+operator |= (const Mat_<_Tp>& a, const Mat_<_Tp>& b)
+{
+ bitwise_or(a, b, (Mat&)a);
+ return (Mat_<_Tp>&)a;
+}
+
+template<typename _Tp> static inline Mat_<_Tp>&
+operator |= (const Mat_<_Tp>& a, const Scalar& s)
+{
+ bitwise_or(a, s, (Mat&)a);
+ return (Mat_<_Tp>&)a;
+}
+
+static inline Mat& operator ^= (const Mat& a, const Mat& b)
+{
+ bitwise_xor(a, b, (Mat&)a);
+ return (Mat&)a;
+}
+
+static inline Mat& operator ^= (const Mat& a, const Scalar& s)
+{
+ bitwise_xor(a, s, (Mat&)a);
+ return (Mat&)a;
+}
+
+template<typename _Tp> static inline Mat_<_Tp>&
+operator ^= (const Mat_<_Tp>& a, const Mat_<_Tp>& b)
+{
+ bitwise_xor(a, b, (Mat&)a);
+ return (Mat_<_Tp>&)a;
+}
+
+template<typename _Tp> static inline Mat_<_Tp>&
+operator ^= (const Mat_<_Tp>& a, const Scalar& s)
+{
+ bitwise_xor(a, s, (Mat&)a);
+ return (Mat_<_Tp>&)a;
+}
+
+/////////////////////////////// Miscellaneous operations //////////////////////////////
+
+template<typename _Tp> void split(const Mat& src, vector<Mat_<_Tp> >& mv)
+{ split(src, (vector<Mat>&)mv ); }
+
+//////////////////////////////////////////////////////////////
+
+template<typename _Tp> inline MatExpr Mat_<_Tp>::zeros(int rows, int cols)
+{
+ return Mat::zeros(rows, cols, DataType<_Tp>::type);
+}
+
+template<typename _Tp> inline MatExpr Mat_<_Tp>::zeros(Size sz)
+{
+ return Mat::zeros(sz, DataType<_Tp>::type);
+}
+
+template<typename _Tp> inline MatExpr Mat_<_Tp>::ones(int rows, int cols)
+{
+ return Mat::ones(rows, cols, DataType<_Tp>::type);
+}
+
+template<typename _Tp> inline MatExpr Mat_<_Tp>::ones(Size sz)
+{
+ return Mat::ones(sz, DataType<_Tp>::type);
+}
+
+template<typename _Tp> inline MatExpr Mat_<_Tp>::eye(int rows, int cols)
+{
+ return Mat::eye(rows, cols, DataType<_Tp>::type);
+}
+
+template<typename _Tp> inline MatExpr Mat_<_Tp>::eye(Size sz)
+{
+ return Mat::eye(sz, DataType<_Tp>::type);
+}
+
+//////////////////////////////// Iterators & Comma initializers //////////////////////////////////
+
+inline MatConstIterator::MatConstIterator()
+ : m(0), elemSize(0), ptr(0), sliceStart(0), sliceEnd(0) {}
+
+inline MatConstIterator::MatConstIterator(const Mat* _m)
+ : m(_m), elemSize(_m->elemSize()), ptr(0), sliceStart(0), sliceEnd(0)
+{
+ if( m && m->isContinuous() )
+ {
+ sliceStart = m->data;
+ sliceEnd = sliceStart + m->total()*elemSize;
+ }
+ seek((const int*)0);
+}
+
+inline MatConstIterator::MatConstIterator(const Mat* _m, int _row, int _col)
+ : m(_m), elemSize(_m->elemSize()), ptr(0), sliceStart(0), sliceEnd(0)
+{
+ CV_Assert(m && m->dims <= 2);
+ if( m->isContinuous() )
+ {
+ sliceStart = m->data;
+ sliceEnd = sliceStart + m->total()*elemSize;
+ }
+ int idx[]={_row, _col};
+ seek(idx);
+}
+
+inline MatConstIterator::MatConstIterator(const Mat* _m, Point _pt)
+ : m(_m), elemSize(_m->elemSize()), ptr(0), sliceStart(0), sliceEnd(0)
+{
+ CV_Assert(m && m->dims <= 2);
+ if( m->isContinuous() )
+ {
+ sliceStart = m->data;
+ sliceEnd = sliceStart + m->total()*elemSize;
+ }
+ int idx[]={_pt.y, _pt.x};
+ seek(idx);
+}
+
+inline MatConstIterator::MatConstIterator(const MatConstIterator& it)
+ : m(it.m), elemSize(it.elemSize), ptr(it.ptr), sliceStart(it.sliceStart), sliceEnd(it.sliceEnd)
+{}
+
+inline MatConstIterator& MatConstIterator::operator = (const MatConstIterator& it )
+{
+ m = it.m; elemSize = it.elemSize; ptr = it.ptr;
+ sliceStart = it.sliceStart; sliceEnd = it.sliceEnd;
+ return *this;
+}
+
+inline uchar* MatConstIterator::operator *() const { return ptr; }
+
+inline MatConstIterator& MatConstIterator::operator += (ptrdiff_t ofs)
+{
+ if( !m || ofs == 0 )
+ return *this;
+ ptrdiff_t ofsb = ofs*elemSize;
+ ptr += ofsb;
+ if( ptr < sliceStart || sliceEnd <= ptr )
+ {
+ ptr -= ofsb;
+ seek(ofs, true);
+ }
+ return *this;
+}
+
+inline MatConstIterator& MatConstIterator::operator -= (ptrdiff_t ofs)
+{ return (*this += -ofs); }
+
+inline MatConstIterator& MatConstIterator::operator --()
+{
+ if( m && (ptr -= elemSize) < sliceStart )
+ {
+ ptr += elemSize;
+ seek(-1, true);
+ }
+ return *this;
+}
+
+inline MatConstIterator MatConstIterator::operator --(int)
+{
+ MatConstIterator b = *this;
+ *this += -1;
+ return b;
+}
+
+inline MatConstIterator& MatConstIterator::operator ++()
+{
+ if( m && (ptr += elemSize) >= sliceEnd )
+ {
+ ptr -= elemSize;
+ seek(1, true);
+ }
+ return *this;
+}
+
+inline MatConstIterator MatConstIterator::operator ++(int)
+{
+ MatConstIterator b = *this;
+ *this += 1;
+ return b;
+}
+
+template<typename _Tp> inline MatConstIterator_<_Tp>::MatConstIterator_() {}
+
+template<typename _Tp> inline MatConstIterator_<_Tp>::MatConstIterator_(const Mat_<_Tp>* _m)
+ : MatConstIterator(_m) {}
+
+template<typename _Tp> inline MatConstIterator_<_Tp>::
+ MatConstIterator_(const Mat_<_Tp>* _m, int _row, int _col)
+ : MatConstIterator(_m, _row, _col) {}
+
+template<typename _Tp> inline MatConstIterator_<_Tp>::
+ MatConstIterator_(const Mat_<_Tp>* _m, Point _pt)
+ : MatConstIterator(_m, _pt) {}
+
+template<typename _Tp> inline MatConstIterator_<_Tp>::
+ MatConstIterator_(const MatConstIterator_& it)
+ : MatConstIterator(it) {}
+
+template<typename _Tp> inline MatConstIterator_<_Tp>&
+ MatConstIterator_<_Tp>::operator = (const MatConstIterator_& it )
+{
+ MatConstIterator::operator = (it);
+ return *this;
+}
+
+template<typename _Tp> inline _Tp MatConstIterator_<_Tp>::operator *() const { return *(_Tp*)(this->ptr); }
+
+template<typename _Tp> inline MatConstIterator_<_Tp>& MatConstIterator_<_Tp>::operator += (ptrdiff_t ofs)
+{
+ MatConstIterator::operator += (ofs);
+ return *this;
+}
+
+template<typename _Tp> inline MatConstIterator_<_Tp>& MatConstIterator_<_Tp>::operator -= (ptrdiff_t ofs)
+{ return (*this += -ofs); }
+
+template<typename _Tp> inline MatConstIterator_<_Tp>& MatConstIterator_<_Tp>::operator --()
+{
+ MatConstIterator::operator --();
+ return *this;
+}
+
+template<typename _Tp> inline MatConstIterator_<_Tp> MatConstIterator_<_Tp>::operator --(int)
+{
+ MatConstIterator_ b = *this;
+ MatConstIterator::operator --();
+ return b;
+}
+
+template<typename _Tp> inline MatConstIterator_<_Tp>& MatConstIterator_<_Tp>::operator ++()
+{
+ MatConstIterator::operator ++();
+ return *this;
+}
+
+template<typename _Tp> inline MatConstIterator_<_Tp> MatConstIterator_<_Tp>::operator ++(int)
+{
+ MatConstIterator_ b = *this;
+ MatConstIterator::operator ++();
+ return b;
+}
+
+template<typename _Tp> inline MatIterator_<_Tp>::MatIterator_() : MatConstIterator_<_Tp>() {}
+
+template<typename _Tp> inline MatIterator_<_Tp>::MatIterator_(Mat_<_Tp>* _m)
+ : MatConstIterator_<_Tp>(_m) {}
+
+template<typename _Tp> inline MatIterator_<_Tp>::MatIterator_(Mat_<_Tp>* _m, int _row, int _col)
+ : MatConstIterator_<_Tp>(_m, _row, _col) {}
+
+template<typename _Tp> inline MatIterator_<_Tp>::MatIterator_(const Mat_<_Tp>* _m, Point _pt)
+ : MatConstIterator_<_Tp>(_m, _pt) {}
+
+template<typename _Tp> inline MatIterator_<_Tp>::MatIterator_(const Mat_<_Tp>* _m, const int* _idx)
+ : MatConstIterator_<_Tp>(_m, _idx) {}
+
+template<typename _Tp> inline MatIterator_<_Tp>::MatIterator_(const MatIterator_& it)
+ : MatConstIterator_<_Tp>(it) {}
+
+template<typename _Tp> inline MatIterator_<_Tp>& MatIterator_<_Tp>::operator = (const MatIterator_<_Tp>& it )
+{
+ MatConstIterator::operator = (it);
+ return *this;
+}
+
+template<typename _Tp> inline _Tp& MatIterator_<_Tp>::operator *() const { return *(_Tp*)(this->ptr); }
+
+template<typename _Tp> inline MatIterator_<_Tp>& MatIterator_<_Tp>::operator += (ptrdiff_t ofs)
+{
+ MatConstIterator::operator += (ofs);
+ return *this;
+}
+
+template<typename _Tp> inline MatIterator_<_Tp>& MatIterator_<_Tp>::operator -= (ptrdiff_t ofs)
+{
+ MatConstIterator::operator += (-ofs);
+ return *this;
+}
+
+template<typename _Tp> inline MatIterator_<_Tp>& MatIterator_<_Tp>::operator --()
+{
+ MatConstIterator::operator --();
+ return *this;
+}
+
+template<typename _Tp> inline MatIterator_<_Tp> MatIterator_<_Tp>::operator --(int)
+{
+ MatIterator_ b = *this;
+ MatConstIterator::operator --();
+ return b;
+}
+
+template<typename _Tp> inline MatIterator_<_Tp>& MatIterator_<_Tp>::operator ++()
+{
+ MatConstIterator::operator ++();
+ return *this;
+}
+
+template<typename _Tp> inline MatIterator_<_Tp> MatIterator_<_Tp>::operator ++(int)
+{
+ MatIterator_ b = *this;
+ MatConstIterator::operator ++();
+ return b;
+}
+
+template<typename _Tp> inline Point MatConstIterator_<_Tp>::pos() const
+{
+ if( !m )
+ return Point();
+ CV_DbgAssert( m->dims <= 2 );
+ if( m->isContinuous() )
+ {
+ ptrdiff_t ofs = (const _Tp*)ptr - (const _Tp*)m->data;
+ int y = (int)(ofs / m->cols), x = (int)(ofs - (ptrdiff_t)y*m->cols);
+ return Point(x, y);
+ }
+ else
+ {
+ ptrdiff_t ofs = (uchar*)ptr - m->data;
+ int y = (int)(ofs / m->step), x = (int)((ofs - y*m->step)/sizeof(_Tp));
+ return Point(x, y);
+ }
+}
+
+static inline bool
+operator == (const MatConstIterator& a, const MatConstIterator& b)
+{ return a.m == b.m && a.ptr == b.ptr; }
+
+template<typename _Tp> static inline bool
+operator != (const MatConstIterator& a, const MatConstIterator& b)
+{ return !(a == b); }
+
+template<typename _Tp> static inline bool
+operator == (const MatConstIterator_<_Tp>& a, const MatConstIterator_<_Tp>& b)
+{ return a.m == b.m && a.ptr == b.ptr; }
+
+template<typename _Tp> static inline bool
+operator != (const MatConstIterator_<_Tp>& a, const MatConstIterator_<_Tp>& b)
+{ return a.m != b.m || a.ptr != b.ptr; }
+
+template<typename _Tp> static inline bool
+operator == (const MatIterator_<_Tp>& a, const MatIterator_<_Tp>& b)
+{ return a.m == b.m && a.ptr == b.ptr; }
+
+template<typename _Tp> static inline bool
+operator != (const MatIterator_<_Tp>& a, const MatIterator_<_Tp>& b)
+{ return a.m != b.m || a.ptr != b.ptr; }
+
+static inline bool
+operator < (const MatConstIterator& a, const MatConstIterator& b)
+{ return a.ptr < b.ptr; }
+
+static inline bool
+operator > (const MatConstIterator& a, const MatConstIterator& b)
+{ return a.ptr > b.ptr; }
+
+static inline bool
+operator <= (const MatConstIterator& a, const MatConstIterator& b)
+{ return a.ptr <= b.ptr; }
+
+static inline bool
+operator >= (const MatConstIterator& a, const MatConstIterator& b)
+{ return a.ptr >= b.ptr; }
+
+CV_EXPORTS ptrdiff_t operator - (const MatConstIterator& b, const MatConstIterator& a);
+
+static inline MatConstIterator operator + (const MatConstIterator& a, ptrdiff_t ofs)
+{ MatConstIterator b = a; return b += ofs; }
+
+static inline MatConstIterator operator + (ptrdiff_t ofs, const MatConstIterator& a)
+{ MatConstIterator b = a; return b += ofs; }
+
+static inline MatConstIterator operator - (const MatConstIterator& a, ptrdiff_t ofs)
+{ MatConstIterator b = a; return b += -ofs; }
+
+template<typename _Tp> static inline MatConstIterator_<_Tp>
+operator + (const MatConstIterator_<_Tp>& a, ptrdiff_t ofs)
+{ MatConstIterator t = (const MatConstIterator&)a + ofs; return (MatConstIterator_<_Tp>&)t; }
+
+template<typename _Tp> static inline MatConstIterator_<_Tp>
+operator + (ptrdiff_t ofs, const MatConstIterator_<_Tp>& a)
+{ MatConstIterator t = (const MatConstIterator&)a + ofs; return (MatConstIterator_<_Tp>&)t; }
+
+template<typename _Tp> static inline MatConstIterator_<_Tp>
+operator - (const MatConstIterator_<_Tp>& a, ptrdiff_t ofs)
+{ MatConstIterator t = (const MatConstIterator&)a - ofs; return (MatConstIterator_<_Tp>&)t; }
+
+inline uchar* MatConstIterator::operator [](ptrdiff_t i) const
+{ return *(*this + i); }
+
+template<typename _Tp> inline _Tp MatConstIterator_<_Tp>::operator [](ptrdiff_t i) const
+{ return *(_Tp*)MatConstIterator::operator [](i); }
+
+template<typename _Tp> static inline MatIterator_<_Tp>
+operator + (const MatIterator_<_Tp>& a, ptrdiff_t ofs)
+{ MatConstIterator t = (const MatConstIterator&)a + ofs; return (MatIterator_<_Tp>&)t; }
+
+template<typename _Tp> static inline MatIterator_<_Tp>
+operator + (ptrdiff_t ofs, const MatIterator_<_Tp>& a)
+{ MatConstIterator t = (const MatConstIterator&)a + ofs; return (MatIterator_<_Tp>&)t; }
+
+template<typename _Tp> static inline MatIterator_<_Tp>
+operator - (const MatIterator_<_Tp>& a, ptrdiff_t ofs)
+{ MatConstIterator t = (const MatConstIterator&)a - ofs; return (MatIterator_<_Tp>&)t; }
+
+template<typename _Tp> inline _Tp& MatIterator_<_Tp>::operator [](ptrdiff_t i) const
+{ return *(*this + i); }
+
+template<typename _Tp> inline MatConstIterator_<_Tp> Mat_<_Tp>::begin() const
+{ return Mat::begin<_Tp>(); }
+
+template<typename _Tp> inline MatConstIterator_<_Tp> Mat_<_Tp>::end() const
+{ return Mat::end<_Tp>(); }
+
+template<typename _Tp> inline MatIterator_<_Tp> Mat_<_Tp>::begin()
+{ return Mat::begin<_Tp>(); }
+
+template<typename _Tp> inline MatIterator_<_Tp> Mat_<_Tp>::end()
+{ return Mat::end<_Tp>(); }
+
+template<typename _Tp> inline MatCommaInitializer_<_Tp>::MatCommaInitializer_(Mat_<_Tp>* _m) : it(_m) {}
+
+template<typename _Tp> template<typename T2> inline MatCommaInitializer_<_Tp>&
+MatCommaInitializer_<_Tp>::operator , (T2 v)
+{
+ CV_DbgAssert( this->it < ((const Mat_<_Tp>*)this->it.m)->end() );
+ *this->it = _Tp(v); ++this->it;
+ return *this;
+}
+
+template<typename _Tp> inline Mat_<_Tp> MatCommaInitializer_<_Tp>::operator *() const
+{
+ CV_DbgAssert( this->it == ((const Mat_<_Tp>*)this->it.m)->end() );
+ return Mat_<_Tp>(*this->it.m);
+}
+
+template<typename _Tp> inline MatCommaInitializer_<_Tp>::operator Mat_<_Tp>() const
+{
+ CV_DbgAssert( this->it == ((const Mat_<_Tp>*)this->it.m)->end() );
+ return Mat_<_Tp>(*this->it.m);
+}
+
+template<typename _Tp, typename T2> static inline MatCommaInitializer_<_Tp>
+operator << (const Mat_<_Tp>& m, T2 val)
+{
+ MatCommaInitializer_<_Tp> commaInitializer((Mat_<_Tp>*)&m);
+ return (commaInitializer, val);
+}
+
+//////////////////////////////// SparseMat ////////////////////////////////
+
+inline SparseMat::SparseMat()
+: flags(MAGIC_VAL), hdr(0)
+{
+}
+
+inline SparseMat::SparseMat(int _dims, const int* _sizes, int _type)
+: flags(MAGIC_VAL), hdr(0)
+{
+ create(_dims, _sizes, _type);
+}
+
+inline SparseMat::SparseMat(const SparseMat& m)
+: flags(m.flags), hdr(m.hdr)
+{
+ addref();
+}
+
+inline SparseMat::~SparseMat()
+{
+ release();
+}
+
+inline SparseMat& SparseMat::operator = (const SparseMat& m)
+{
+ if( this != &m )
+ {
+ if( m.hdr )
+ CV_XADD(&m.hdr->refcount, 1);
+ release();
+ flags = m.flags;
+ hdr = m.hdr;
+ }
+ return *this;
+}
+
+inline SparseMat& SparseMat::operator = (const Mat& m)
+{ return (*this = SparseMat(m)); }
+
+inline SparseMat SparseMat::clone() const
+{
+ SparseMat temp;
+ this->copyTo(temp);
+ return temp;
+}
+
+
+inline void SparseMat::assignTo( SparseMat& m, int _type ) const
+{
+ if( _type < 0 )
+ m = *this;
+ else
+ convertTo(m, _type);
+}
+
+inline void SparseMat::addref()
+{ if( hdr ) CV_XADD(&hdr->refcount, 1); }
+
+inline void SparseMat::release()
+{
+ if( hdr && CV_XADD(&hdr->refcount, -1) == 1 )
+ delete hdr;
+ hdr = 0;
+}
+
+inline size_t SparseMat::elemSize() const
+{ return CV_ELEM_SIZE(flags); }
+
+inline size_t SparseMat::elemSize1() const
+{ return CV_ELEM_SIZE1(flags); }
+
+inline int SparseMat::type() const
+{ return CV_MAT_TYPE(flags); }
+
+inline int SparseMat::depth() const
+{ return CV_MAT_DEPTH(flags); }
+
+inline int SparseMat::channels() const
+{ return CV_MAT_CN(flags); }
+
+inline const int* SparseMat::size() const
+{
+ return hdr ? hdr->size : 0;
+}
+
+inline int SparseMat::size(int i) const
+{
+ if( hdr )
+ {
+ CV_DbgAssert((unsigned)i < (unsigned)hdr->dims);
+ return hdr->size[i];
+ }
+ return 0;
+}
+
+inline int SparseMat::dims() const
+{
+ return hdr ? hdr->dims : 0;
+}
+
+inline size_t SparseMat::nzcount() const
+{
+ return hdr ? hdr->nodeCount : 0;
+}
+
+inline size_t SparseMat::hash(int i0) const
+{
+ return (size_t)i0;
+}
+
+inline size_t SparseMat::hash(int i0, int i1) const
+{
+ return (size_t)(unsigned)i0*HASH_SCALE + (unsigned)i1;
+}
+
+inline size_t SparseMat::hash(int i0, int i1, int i2) const
+{
+ return ((size_t)(unsigned)i0*HASH_SCALE + (unsigned)i1)*HASH_SCALE + (unsigned)i2;
+}
+
+inline size_t SparseMat::hash(const int* idx) const
+{
+ size_t h = (unsigned)idx[0];
+ if( !hdr )
+ return 0;
+ int i, d = hdr->dims;
+ for( i = 1; i < d; i++ )
+ h = h*HASH_SCALE + (unsigned)idx[i];
+ return h;
+}
+
+template<typename _Tp> inline _Tp& SparseMat::ref(int i0, size_t* hashval)
+{ return *(_Tp*)((SparseMat*)this)->ptr(i0, true, hashval); }
+
+template<typename _Tp> inline _Tp& SparseMat::ref(int i0, int i1, size_t* hashval)
+{ return *(_Tp*)((SparseMat*)this)->ptr(i0, i1, true, hashval); }
+
+template<typename _Tp> inline _Tp& SparseMat::ref(int i0, int i1, int i2, size_t* hashval)
+{ return *(_Tp*)((SparseMat*)this)->ptr(i0, i1, i2, true, hashval); }
+
+template<typename _Tp> inline _Tp& SparseMat::ref(const int* idx, size_t* hashval)
+{ return *(_Tp*)((SparseMat*)this)->ptr(idx, true, hashval); }
+
+template<typename _Tp> inline _Tp SparseMat::value(int i0, size_t* hashval) const
+{
+ const _Tp* p = (const _Tp*)((SparseMat*)this)->ptr(i0, false, hashval);
+ return p ? *p : _Tp();
+}
+
+template<typename _Tp> inline _Tp SparseMat::value(int i0, int i1, size_t* hashval) const
+{
+ const _Tp* p = (const _Tp*)((SparseMat*)this)->ptr(i0, i1, false, hashval);
+ return p ? *p : _Tp();
+}
+
+template<typename _Tp> inline _Tp SparseMat::value(int i0, int i1, int i2, size_t* hashval) const
+{
+ const _Tp* p = (const _Tp*)((SparseMat*)this)->ptr(i0, i1, i2, false, hashval);
+ return p ? *p : _Tp();
+}
+
+template<typename _Tp> inline _Tp SparseMat::value(const int* idx, size_t* hashval) const
+{
+ const _Tp* p = (const _Tp*)((SparseMat*)this)->ptr(idx, false, hashval);
+ return p ? *p : _Tp();
+}
+
+template<typename _Tp> inline const _Tp* SparseMat::find(int i0, size_t* hashval) const
+{ return (const _Tp*)((SparseMat*)this)->ptr(i0, false, hashval); }
+
+template<typename _Tp> inline const _Tp* SparseMat::find(int i0, int i1, size_t* hashval) const
+{ return (const _Tp*)((SparseMat*)this)->ptr(i0, i1, false, hashval); }
+
+template<typename _Tp> inline const _Tp* SparseMat::find(int i0, int i1, int i2, size_t* hashval) const
+{ return (const _Tp*)((SparseMat*)this)->ptr(i0, i1, i2, false, hashval); }
+
+template<typename _Tp> inline const _Tp* SparseMat::find(const int* idx, size_t* hashval) const
+{ return (const _Tp*)((SparseMat*)this)->ptr(idx, false, hashval); }
+
+template<typename _Tp> inline _Tp& SparseMat::value(Node* n)
+{ return *(_Tp*)((uchar*)n + hdr->valueOffset); }
+
+template<typename _Tp> inline const _Tp& SparseMat::value(const Node* n) const
+{ return *(const _Tp*)((const uchar*)n + hdr->valueOffset); }
+
+inline SparseMat::Node* SparseMat::node(size_t nidx)
+{ return (Node*)(void*)&hdr->pool[nidx]; }
+
+inline const SparseMat::Node* SparseMat::node(size_t nidx) const
+{ return (const Node*)(void*)&hdr->pool[nidx]; }
+
+inline SparseMatIterator SparseMat::begin()
+{ return SparseMatIterator(this); }
+
+inline SparseMatConstIterator SparseMat::begin() const
+{ return SparseMatConstIterator(this); }
+
+inline SparseMatIterator SparseMat::end()
+{ SparseMatIterator it(this); it.seekEnd(); return it; }
+
+inline SparseMatConstIterator SparseMat::end() const
+{ SparseMatConstIterator it(this); it.seekEnd(); return it; }
+
+template<typename _Tp> inline SparseMatIterator_<_Tp> SparseMat::begin()
+{ return SparseMatIterator_<_Tp>(this); }
+
+template<typename _Tp> inline SparseMatConstIterator_<_Tp> SparseMat::begin() const
+{ return SparseMatConstIterator_<_Tp>(this); }
+
+template<typename _Tp> inline SparseMatIterator_<_Tp> SparseMat::end()
+{ SparseMatIterator_<_Tp> it(this); it.seekEnd(); return it; }
+
+template<typename _Tp> inline SparseMatConstIterator_<_Tp> SparseMat::end() const
+{ SparseMatConstIterator_<_Tp> it(this); it.seekEnd(); return it; }
+
+
+inline SparseMatConstIterator::SparseMatConstIterator()
+: m(0), hashidx(0), ptr(0)
+{
+}
+
+inline SparseMatConstIterator::SparseMatConstIterator(const SparseMatConstIterator& it)
+: m(it.m), hashidx(it.hashidx), ptr(it.ptr)
+{
+}
+
+static inline bool operator == (const SparseMatConstIterator& it1, const SparseMatConstIterator& it2)
+{ return it1.m == it2.m && it1.ptr == it2.ptr; }
+
+static inline bool operator != (const SparseMatConstIterator& it1, const SparseMatConstIterator& it2)
+{ return !(it1 == it2); }
+
+
+inline SparseMatConstIterator& SparseMatConstIterator::operator = (const SparseMatConstIterator& it)
+{
+ if( this != &it )
+ {
+ m = it.m;
+ hashidx = it.hashidx;
+ ptr = it.ptr;
+ }
+ return *this;
+}
+
+template<typename _Tp> inline const _Tp& SparseMatConstIterator::value() const
+{ return *(_Tp*)ptr; }
+
+inline const SparseMat::Node* SparseMatConstIterator::node() const
+{
+ return ptr && m && m->hdr ?
+ (const SparseMat::Node*)(void*)(ptr - m->hdr->valueOffset) : 0;
+}
+
+inline SparseMatConstIterator SparseMatConstIterator::operator ++(int)
+{
+ SparseMatConstIterator it = *this;
+ ++*this;
+ return it;
+}
+
+
+inline void SparseMatConstIterator::seekEnd()
+{
+ if( m && m->hdr )
+ {
+ hashidx = m->hdr->hashtab.size();
+ ptr = 0;
+ }
+}
+
+inline SparseMatIterator::SparseMatIterator()
+{}
+
+inline SparseMatIterator::SparseMatIterator(SparseMat* _m)
+: SparseMatConstIterator(_m)
+{}
+
+inline SparseMatIterator::SparseMatIterator(const SparseMatIterator& it)
+: SparseMatConstIterator(it)
+{
+}
+
+inline SparseMatIterator& SparseMatIterator::operator = (const SparseMatIterator& it)
+{
+ (SparseMatConstIterator&)*this = it;
+ return *this;
+}
+
+template<typename _Tp> inline _Tp& SparseMatIterator::value() const
+{ return *(_Tp*)ptr; }
+
+inline SparseMat::Node* SparseMatIterator::node() const
+{
+ return (SparseMat::Node*)SparseMatConstIterator::node();
+}
+
+inline SparseMatIterator& SparseMatIterator::operator ++()
+{
+ SparseMatConstIterator::operator ++();
+ return *this;
+}
+
+inline SparseMatIterator SparseMatIterator::operator ++(int)
+{
+ SparseMatIterator it = *this;
+ ++*this;
+ return it;
+}
+
+
+template<typename _Tp> inline SparseMat_<_Tp>::SparseMat_()
+{ flags = MAGIC_VAL | DataType<_Tp>::type; }
+
+template<typename _Tp> inline SparseMat_<_Tp>::SparseMat_(int _dims, const int* _sizes)
+: SparseMat(_dims, _sizes, DataType<_Tp>::type)
+{}
+
+template<typename _Tp> inline SparseMat_<_Tp>::SparseMat_(const SparseMat& m)
+{
+ if( m.type() == DataType<_Tp>::type )
+ *this = (const SparseMat_<_Tp>&)m;
+ else
+ m.convertTo(*this, DataType<_Tp>::type);
+}
+
+template<typename _Tp> inline SparseMat_<_Tp>::SparseMat_(const SparseMat_<_Tp>& m)
+{
+ this->flags = m.flags;
+ this->hdr = m.hdr;
+ if( this->hdr )
+ CV_XADD(&this->hdr->refcount, 1);
+}
+
+template<typename _Tp> inline SparseMat_<_Tp>::SparseMat_(const Mat& m)
+{
+ SparseMat sm(m);
+ *this = sm;
+}
+
+template<typename _Tp> inline SparseMat_<_Tp>::SparseMat_(const CvSparseMat* m)
+{
+ SparseMat sm(m);
+ *this = sm;
+}
+
+template<typename _Tp> inline SparseMat_<_Tp>&
+SparseMat_<_Tp>::operator = (const SparseMat_<_Tp>& m)
+{
+ if( this != &m )
+ {
+ if( m.hdr ) CV_XADD(&m.hdr->refcount, 1);
+ release();
+ flags = m.flags;
+ hdr = m.hdr;
+ }
+ return *this;
+}
+
+template<typename _Tp> inline SparseMat_<_Tp>&
+SparseMat_<_Tp>::operator = (const SparseMat& m)
+{
+ if( m.type() == DataType<_Tp>::type )
+ return (*this = (const SparseMat_<_Tp>&)m);
+ m.convertTo(*this, DataType<_Tp>::type);
+ return *this;
+}
+
+template<typename _Tp> inline SparseMat_<_Tp>&
+SparseMat_<_Tp>::operator = (const Mat& m)
+{ return (*this = SparseMat(m)); }
+
+template<typename _Tp> inline SparseMat_<_Tp>
+SparseMat_<_Tp>::clone() const
+{
+ SparseMat_<_Tp> m;
+ this->copyTo(m);
+ return m;
+}
+
+template<typename _Tp> inline void
+SparseMat_<_Tp>::create(int _dims, const int* _sizes)
+{
+ SparseMat::create(_dims, _sizes, DataType<_Tp>::type);
+}
+
+template<typename _Tp> inline
+SparseMat_<_Tp>::operator CvSparseMat*() const
+{
+ return SparseMat::operator CvSparseMat*();
+}
+
+template<typename _Tp> inline int SparseMat_<_Tp>::type() const
+{ return DataType<_Tp>::type; }
+
+template<typename _Tp> inline int SparseMat_<_Tp>::depth() const
+{ return DataType<_Tp>::depth; }
+
+template<typename _Tp> inline int SparseMat_<_Tp>::channels() const
+{ return DataType<_Tp>::channels; }
+
+template<typename _Tp> inline _Tp&
+SparseMat_<_Tp>::ref(int i0, size_t* hashval)
+{ return SparseMat::ref<_Tp>(i0, hashval); }
+
+template<typename _Tp> inline _Tp
+SparseMat_<_Tp>::operator()(int i0, size_t* hashval) const
+{ return SparseMat::value<_Tp>(i0, hashval); }
+
+template<typename _Tp> inline _Tp&
+SparseMat_<_Tp>::ref(int i0, int i1, size_t* hashval)
+{ return SparseMat::ref<_Tp>(i0, i1, hashval); }
+
+template<typename _Tp> inline _Tp
+SparseMat_<_Tp>::operator()(int i0, int i1, size_t* hashval) const
+{ return SparseMat::value<_Tp>(i0, i1, hashval); }
+
+template<typename _Tp> inline _Tp&
+SparseMat_<_Tp>::ref(int i0, int i1, int i2, size_t* hashval)
+{ return SparseMat::ref<_Tp>(i0, i1, i2, hashval); }
+
+template<typename _Tp> inline _Tp
+SparseMat_<_Tp>::operator()(int i0, int i1, int i2, size_t* hashval) const
+{ return SparseMat::value<_Tp>(i0, i1, i2, hashval); }
+
+template<typename _Tp> inline _Tp&
+SparseMat_<_Tp>::ref(const int* idx, size_t* hashval)
+{ return SparseMat::ref<_Tp>(idx, hashval); }
+
+template<typename _Tp> inline _Tp
+SparseMat_<_Tp>::operator()(const int* idx, size_t* hashval) const
+{ return SparseMat::value<_Tp>(idx, hashval); }
+
+template<typename _Tp> inline SparseMatIterator_<_Tp> SparseMat_<_Tp>::begin()
+{ return SparseMatIterator_<_Tp>(this); }
+
+template<typename _Tp> inline SparseMatConstIterator_<_Tp> SparseMat_<_Tp>::begin() const
+{ return SparseMatConstIterator_<_Tp>(this); }
+
+template<typename _Tp> inline SparseMatIterator_<_Tp> SparseMat_<_Tp>::end()
+{ SparseMatIterator_<_Tp> it(this); it.seekEnd(); return it; }
+
+template<typename _Tp> inline SparseMatConstIterator_<_Tp> SparseMat_<_Tp>::end() const
+{ SparseMatConstIterator_<_Tp> it(this); it.seekEnd(); return it; }
+
+template<typename _Tp> inline
+SparseMatConstIterator_<_Tp>::SparseMatConstIterator_()
+{}
+
+template<typename _Tp> inline
+SparseMatConstIterator_<_Tp>::SparseMatConstIterator_(const SparseMat_<_Tp>* _m)
+: SparseMatConstIterator(_m)
+{}
+
+template<typename _Tp> inline
+SparseMatConstIterator_<_Tp>::SparseMatConstIterator_(const SparseMat* _m)
+: SparseMatConstIterator(_m)
+{
+ CV_Assert( _m->type() == DataType<_Tp>::type );
+}
+
+template<typename _Tp> inline
+SparseMatConstIterator_<_Tp>::SparseMatConstIterator_(const SparseMatConstIterator_<_Tp>& it)
+: SparseMatConstIterator(it)
+{}
+
+template<typename _Tp> inline SparseMatConstIterator_<_Tp>&
+SparseMatConstIterator_<_Tp>::operator = (const SparseMatConstIterator_<_Tp>& it)
+{ return reinterpret_cast<SparseMatConstIterator_<_Tp>&>
+ (*reinterpret_cast<SparseMatConstIterator*>(this) =
+ reinterpret_cast<const SparseMatConstIterator&>(it)); }
+
+template<typename _Tp> inline const _Tp&
+SparseMatConstIterator_<_Tp>::operator *() const
+{ return *(const _Tp*)this->ptr; }
+
+template<typename _Tp> inline SparseMatConstIterator_<_Tp>&
+SparseMatConstIterator_<_Tp>::operator ++()
+{
+ SparseMatConstIterator::operator ++();
+ return *this;
+}
+
+template<typename _Tp> inline SparseMatConstIterator_<_Tp>
+SparseMatConstIterator_<_Tp>::operator ++(int)
+{
+ SparseMatConstIterator_<_Tp> it = *this;
+ SparseMatConstIterator::operator ++();
+ return it;
+}
+
+template<typename _Tp> inline
+SparseMatIterator_<_Tp>::SparseMatIterator_()
+{}
+
+template<typename _Tp> inline
+SparseMatIterator_<_Tp>::SparseMatIterator_(SparseMat_<_Tp>* _m)
+: SparseMatConstIterator_<_Tp>(_m)
+{}
+
+template<typename _Tp> inline
+SparseMatIterator_<_Tp>::SparseMatIterator_(SparseMat* _m)
+: SparseMatConstIterator_<_Tp>(_m)
+{}
+
+template<typename _Tp> inline
+SparseMatIterator_<_Tp>::SparseMatIterator_(const SparseMatIterator_<_Tp>& it)
+: SparseMatConstIterator_<_Tp>(it)
+{}
+
+template<typename _Tp> inline SparseMatIterator_<_Tp>&
+SparseMatIterator_<_Tp>::operator = (const SparseMatIterator_<_Tp>& it)
+{ return reinterpret_cast<SparseMatIterator_<_Tp>&>
+ (*reinterpret_cast<SparseMatConstIterator*>(this) =
+ reinterpret_cast<const SparseMatConstIterator&>(it)); }
+
+template<typename _Tp> inline _Tp&
+SparseMatIterator_<_Tp>::operator *() const
+{ return *(_Tp*)this->ptr; }
+
+template<typename _Tp> inline SparseMatIterator_<_Tp>&
+SparseMatIterator_<_Tp>::operator ++()
+{
+ SparseMatConstIterator::operator ++();
+ return *this;
+}
+
+template<typename _Tp> inline SparseMatIterator_<_Tp>
+SparseMatIterator_<_Tp>::operator ++(int)
+{
+ SparseMatIterator_<_Tp> it = *this;
+ SparseMatConstIterator::operator ++();
+ return it;
+}
+
+}
+
+#endif
+#endif
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/opengl_interop.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/opengl_interop.hpp
new file mode 100644
index 00000000..7ecaa8e2
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/opengl_interop.hpp
@@ -0,0 +1,284 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_OPENGL_INTEROP_HPP__
+#define __OPENCV_OPENGL_INTEROP_HPP__
+
+#ifdef __cplusplus
+
+#include "opencv2/core/core.hpp"
+#include "opencv2/core/opengl_interop_deprecated.hpp"
+
+namespace cv { namespace ogl {
+
+/////////////////// OpenGL Objects ///////////////////
+
+//! Smart pointer for OpenGL buffer memory with reference counting.
+class CV_EXPORTS Buffer
+{
+public:
+ enum Target
+ {
+ ARRAY_BUFFER = 0x8892, //!< The buffer will be used as a source for vertex data
+ ELEMENT_ARRAY_BUFFER = 0x8893, //!< The buffer will be used for indices (in glDrawElements, for example)
+ PIXEL_PACK_BUFFER = 0x88EB, //!< The buffer will be used for reading from OpenGL textures
+ PIXEL_UNPACK_BUFFER = 0x88EC //!< The buffer will be used for writing to OpenGL textures
+ };
+
+ enum Access
+ {
+ READ_ONLY = 0x88B8,
+ WRITE_ONLY = 0x88B9,
+ READ_WRITE = 0x88BA
+ };
+
+ //! create empty buffer
+ Buffer();
+
+ //! create buffer from existed buffer id
+ Buffer(int arows, int acols, int atype, unsigned int abufId, bool autoRelease = false);
+ Buffer(Size asize, int atype, unsigned int abufId, bool autoRelease = false);
+
+ //! create buffer
+ Buffer(int arows, int acols, int atype, Target target = ARRAY_BUFFER, bool autoRelease = false);
+ Buffer(Size asize, int atype, Target target = ARRAY_BUFFER, bool autoRelease = false);
+
+ //! copy from host/device memory
+ explicit Buffer(InputArray arr, Target target = ARRAY_BUFFER, bool autoRelease = false);
+
+ //! create buffer
+ void create(int arows, int acols, int atype, Target target = ARRAY_BUFFER, bool autoRelease = false);
+ void create(Size asize, int atype, Target target = ARRAY_BUFFER, bool autoRelease = false) { create(asize.height, asize.width, atype, target, autoRelease); }
+
+ //! release memory and delete buffer object
+ void release();
+
+ //! set auto release mode (if true, release will be called in object's destructor)
+ void setAutoRelease(bool flag);
+
+ //! copy from host/device memory
+ void copyFrom(InputArray arr, Target target = ARRAY_BUFFER, bool autoRelease = false);
+
+ //! copy to host/device memory
+ void copyTo(OutputArray arr, Target target = ARRAY_BUFFER, bool autoRelease = false) const;
+
+ //! create copy of current buffer
+ Buffer clone(Target target = ARRAY_BUFFER, bool autoRelease = false) const;
+
+ //! bind buffer for specified target
+ void bind(Target target) const;
+
+ //! unbind any buffers from specified target
+ static void unbind(Target target);
+
+ //! map to host memory
+ Mat mapHost(Access access);
+ void unmapHost();
+
+ //! map to device memory
+ gpu::GpuMat mapDevice();
+ void unmapDevice();
+
+ int rows() const { return rows_; }
+ int cols() const { return cols_; }
+ Size size() const { return Size(cols_, rows_); }
+ bool empty() const { return rows_ == 0 || cols_ == 0; }
+
+ int type() const { return type_; }
+ int depth() const { return CV_MAT_DEPTH(type_); }
+ int channels() const { return CV_MAT_CN(type_); }
+ int elemSize() const { return CV_ELEM_SIZE(type_); }
+ int elemSize1() const { return CV_ELEM_SIZE1(type_); }
+
+ unsigned int bufId() const;
+
+ class Impl;
+
+private:
+ Ptr<Impl> impl_;
+ int rows_;
+ int cols_;
+ int type_;
+};
+
+//! Smart pointer for OpenGL 2D texture memory with reference counting.
+class CV_EXPORTS Texture2D
+{
+public:
+ enum Format
+ {
+ NONE = 0,
+ DEPTH_COMPONENT = 0x1902, //!< Depth
+ RGB = 0x1907, //!< Red, Green, Blue
+ RGBA = 0x1908 //!< Red, Green, Blue, Alpha
+ };
+
+ //! create empty texture
+ Texture2D();
+
+ //! create texture from existed texture id
+ Texture2D(int arows, int acols, Format aformat, unsigned int atexId, bool autoRelease = false);
+ Texture2D(Size asize, Format aformat, unsigned int atexId, bool autoRelease = false);
+
+ //! create texture
+ Texture2D(int arows, int acols, Format aformat, bool autoRelease = false);
+ Texture2D(Size asize, Format aformat, bool autoRelease = false);
+
+ //! copy from host/device memory
+ explicit Texture2D(InputArray arr, bool autoRelease = false);
+
+ //! create texture
+ void create(int arows, int acols, Format aformat, bool autoRelease = false);
+ void create(Size asize, Format aformat, bool autoRelease = false) { create(asize.height, asize.width, aformat, autoRelease); }
+
+ //! release memory and delete texture object
+ void release();
+
+ //! set auto release mode (if true, release will be called in object's destructor)
+ void setAutoRelease(bool flag);
+
+ //! copy from host/device memory
+ void copyFrom(InputArray arr, bool autoRelease = false);
+
+ //! copy to host/device memory
+ void copyTo(OutputArray arr, int ddepth = CV_32F, bool autoRelease = false) const;
+
+ //! bind texture to current active texture unit for GL_TEXTURE_2D target
+ void bind() const;
+
+ int rows() const { return rows_; }
+ int cols() const { return cols_; }
+ Size size() const { return Size(cols_, rows_); }
+ bool empty() const { return rows_ == 0 || cols_ == 0; }
+
+ Format format() const { return format_; }
+
+ unsigned int texId() const;
+
+ class Impl;
+
+private:
+ Ptr<Impl> impl_;
+ int rows_;
+ int cols_;
+ Format format_;
+};
+
+//! OpenGL Arrays
+class CV_EXPORTS Arrays
+{
+public:
+ Arrays();
+
+ void setVertexArray(InputArray vertex);
+ void resetVertexArray();
+
+ void setColorArray(InputArray color);
+ void resetColorArray();
+
+ void setNormalArray(InputArray normal);
+ void resetNormalArray();
+
+ void setTexCoordArray(InputArray texCoord);
+ void resetTexCoordArray();
+
+ void release();
+
+ void setAutoRelease(bool flag);
+
+ void bind() const;
+
+ int size() const { return size_; }
+ bool empty() const { return size_ == 0; }
+
+private:
+ int size_;
+ Buffer vertex_;
+ Buffer color_;
+ Buffer normal_;
+ Buffer texCoord_;
+};
+
+/////////////////// Render Functions ///////////////////
+
+//! render texture rectangle in window
+CV_EXPORTS void render(const Texture2D& tex,
+ Rect_<double> wndRect = Rect_<double>(0.0, 0.0, 1.0, 1.0),
+ Rect_<double> texRect = Rect_<double>(0.0, 0.0, 1.0, 1.0));
+
+//! render mode
+enum {
+ POINTS = 0x0000,
+ LINES = 0x0001,
+ LINE_LOOP = 0x0002,
+ LINE_STRIP = 0x0003,
+ TRIANGLES = 0x0004,
+ TRIANGLE_STRIP = 0x0005,
+ TRIANGLE_FAN = 0x0006,
+ QUADS = 0x0007,
+ QUAD_STRIP = 0x0008,
+ POLYGON = 0x0009
+};
+
+//! render OpenGL arrays
+CV_EXPORTS void render(const Arrays& arr, int mode = POINTS, Scalar color = Scalar::all(255));
+CV_EXPORTS void render(const Arrays& arr, InputArray indices, int mode = POINTS, Scalar color = Scalar::all(255));
+
+}} // namespace cv::gl
+
+namespace cv { namespace gpu {
+
+//! set a CUDA device to use OpenGL interoperability
+CV_EXPORTS void setGlDevice(int device = 0);
+
+}}
+
+namespace cv {
+
+template <> CV_EXPORTS void Ptr<cv::ogl::Buffer::Impl>::delete_obj();
+template <> CV_EXPORTS void Ptr<cv::ogl::Texture2D::Impl>::delete_obj();
+
+}
+
+#endif // __cplusplus
+
+#endif // __OPENCV_OPENGL_INTEROP_HPP__
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/opengl_interop_deprecated.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/opengl_interop_deprecated.hpp
new file mode 100644
index 00000000..04e3fc0c
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/opengl_interop_deprecated.hpp
@@ -0,0 +1,300 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_OPENGL_INTEROP_DEPRECATED_HPP__
+#define __OPENCV_OPENGL_INTEROP_DEPRECATED_HPP__
+
+#ifdef __cplusplus
+
+#include "opencv2/core/core.hpp"
+
+namespace cv
+{
+//! Smart pointer for OpenGL buffer memory with reference counting.
+class CV_EXPORTS GlBuffer
+{
+public:
+ enum Usage
+ {
+ ARRAY_BUFFER = 0x8892, // buffer will use for OpenGL arrays (vertices, colors, normals, etc)
+ TEXTURE_BUFFER = 0x88EC // buffer will ise for OpenGL textures
+ };
+
+ //! create empty buffer
+ explicit GlBuffer(Usage usage);
+
+ //! create buffer
+ GlBuffer(int rows, int cols, int type, Usage usage);
+ GlBuffer(Size size, int type, Usage usage);
+
+ //! copy from host/device memory
+ GlBuffer(InputArray mat, Usage usage);
+
+ void create(int rows, int cols, int type, Usage usage);
+ void create(Size size, int type, Usage usage);
+ void create(int rows, int cols, int type);
+ void create(Size size, int type);
+
+ void release();
+
+ //! copy from host/device memory
+ void copyFrom(InputArray mat);
+
+ void bind() const;
+ void unbind() const;
+
+ //! map to host memory
+ Mat mapHost();
+ void unmapHost();
+
+ //! map to device memory
+ gpu::GpuMat mapDevice();
+ void unmapDevice();
+
+ inline int rows() const { return rows_; }
+ inline int cols() const { return cols_; }
+ inline Size size() const { return Size(cols_, rows_); }
+ inline bool empty() const { return rows_ == 0 || cols_ == 0; }
+
+ inline int type() const { return type_; }
+ inline int depth() const { return CV_MAT_DEPTH(type_); }
+ inline int channels() const { return CV_MAT_CN(type_); }
+ inline int elemSize() const { return CV_ELEM_SIZE(type_); }
+ inline int elemSize1() const { return CV_ELEM_SIZE1(type_); }
+
+ inline Usage usage() const { return usage_; }
+
+ class Impl;
+private:
+ int rows_;
+ int cols_;
+ int type_;
+ Usage usage_;
+
+ Ptr<Impl> impl_;
+};
+
+template <> CV_EXPORTS void Ptr<GlBuffer::Impl>::delete_obj();
+
+//! Smart pointer for OpenGL 2d texture memory with reference counting.
+class CV_EXPORTS GlTexture
+{
+public:
+ //! create empty texture
+ GlTexture();
+
+ //! create texture
+ GlTexture(int rows, int cols, int type);
+ GlTexture(Size size, int type);
+
+ //! copy from host/device memory
+ explicit GlTexture(InputArray mat, bool bgra = true);
+
+ void create(int rows, int cols, int type);
+ void create(Size size, int type);
+ void release();
+
+ //! copy from host/device memory
+ void copyFrom(InputArray mat, bool bgra = true);
+
+ void bind() const;
+ void unbind() const;
+
+ inline int rows() const { return rows_; }
+ inline int cols() const { return cols_; }
+ inline Size size() const { return Size(cols_, rows_); }
+ inline bool empty() const { return rows_ == 0 || cols_ == 0; }
+
+ inline int type() const { return type_; }
+ inline int depth() const { return CV_MAT_DEPTH(type_); }
+ inline int channels() const { return CV_MAT_CN(type_); }
+ inline int elemSize() const { return CV_ELEM_SIZE(type_); }
+ inline int elemSize1() const { return CV_ELEM_SIZE1(type_); }
+
+ class Impl;
+private:
+ int rows_;
+ int cols_;
+ int type_;
+
+ Ptr<Impl> impl_;
+ GlBuffer buf_;
+};
+
+template <> CV_EXPORTS void Ptr<GlTexture::Impl>::delete_obj();
+
+//! OpenGL Arrays
+class CV_EXPORTS GlArrays
+{
+public:
+ inline GlArrays()
+ : vertex_(GlBuffer::ARRAY_BUFFER), color_(GlBuffer::ARRAY_BUFFER), normal_(GlBuffer::ARRAY_BUFFER), texCoord_(GlBuffer::ARRAY_BUFFER)
+ {
+ }
+
+ void setVertexArray(InputArray vertex);
+ inline void resetVertexArray() { vertex_.release(); }
+
+ void setColorArray(InputArray color, bool bgra = true);
+ inline void resetColorArray() { color_.release(); }
+
+ void setNormalArray(InputArray normal);
+ inline void resetNormalArray() { normal_.release(); }
+
+ void setTexCoordArray(InputArray texCoord);
+ inline void resetTexCoordArray() { texCoord_.release(); }
+
+ void bind() const;
+ void unbind() const;
+
+ inline int rows() const { return vertex_.rows(); }
+ inline int cols() const { return vertex_.cols(); }
+ inline Size size() const { return vertex_.size(); }
+ inline bool empty() const { return vertex_.empty(); }
+
+private:
+ GlBuffer vertex_;
+ GlBuffer color_;
+ GlBuffer normal_;
+ GlBuffer texCoord_;
+};
+
+//! OpenGL Font
+class CV_EXPORTS GlFont
+{
+public:
+ enum Weight
+ {
+ WEIGHT_LIGHT = 300,
+ WEIGHT_NORMAL = 400,
+ WEIGHT_SEMIBOLD = 600,
+ WEIGHT_BOLD = 700,
+ WEIGHT_BLACK = 900
+ };
+
+ enum Style
+ {
+ STYLE_NORMAL = 0,
+ STYLE_ITALIC = 1,
+ STYLE_UNDERLINE = 2
+ };
+
+ static Ptr<GlFont> get(const std::string& family, int height = 12, Weight weight = WEIGHT_NORMAL, Style style = STYLE_NORMAL);
+
+ void draw(const char* str, int len) const;
+
+ inline const std::string& family() const { return family_; }
+ inline int height() const { return height_; }
+ inline Weight weight() const { return weight_; }
+ inline Style style() const { return style_; }
+
+private:
+ GlFont(const std::string& family, int height, Weight weight, Style style);
+
+ std::string family_;
+ int height_;
+ Weight weight_;
+ Style style_;
+
+ unsigned int base_;
+
+ GlFont(const GlFont&);
+ GlFont& operator =(const GlFont&);
+};
+
+//! render functions
+
+//! render texture rectangle in window
+CV_EXPORTS void render(const GlTexture& tex,
+ Rect_<double> wndRect = Rect_<double>(0.0, 0.0, 1.0, 1.0),
+ Rect_<double> texRect = Rect_<double>(0.0, 0.0, 1.0, 1.0));
+
+//! render mode
+namespace RenderMode {
+ enum {
+ POINTS = 0x0000,
+ LINES = 0x0001,
+ LINE_LOOP = 0x0002,
+ LINE_STRIP = 0x0003,
+ TRIANGLES = 0x0004,
+ TRIANGLE_STRIP = 0x0005,
+ TRIANGLE_FAN = 0x0006,
+ QUADS = 0x0007,
+ QUAD_STRIP = 0x0008,
+ POLYGON = 0x0009
+ };
+}
+
+//! render OpenGL arrays
+CV_EXPORTS void render(const GlArrays& arr, int mode = RenderMode::POINTS, Scalar color = Scalar::all(255));
+
+CV_EXPORTS void render(const std::string& str, const Ptr<GlFont>& font, Scalar color, Point2d pos);
+
+//! OpenGL camera
+class CV_EXPORTS GlCamera
+{
+public:
+ GlCamera();
+
+ void lookAt(Point3d eye, Point3d center, Point3d up);
+ void setCameraPos(Point3d pos, double yaw, double pitch, double roll);
+
+ void setScale(Point3d scale);
+
+ void setProjectionMatrix(const Mat& projectionMatrix, bool transpose = true);
+ void setPerspectiveProjection(double fov, double aspect, double zNear, double zFar);
+ void setOrthoProjection(double left, double right, double bottom, double top, double zNear, double zFar);
+
+ void setupProjectionMatrix() const;
+ void setupModelViewMatrix() const;
+};
+
+inline void GlBuffer::create(Size _size, int _type, Usage _usage) { create(_size.height, _size.width, _type, _usage); }
+inline void GlBuffer::create(int _rows, int _cols, int _type) { create(_rows, _cols, _type, usage()); }
+inline void GlBuffer::create(Size _size, int _type) { create(_size.height, _size.width, _type, usage()); }
+inline void GlTexture::create(Size _size, int _type) { create(_size.height, _size.width, _type); }
+
+} // namespace cv
+
+#endif // __cplusplus
+
+#endif // __OPENCV_OPENGL_INTEROP_DEPRECATED_HPP__
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/operations.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/operations.hpp
new file mode 100644
index 00000000..0ae51c69
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/operations.hpp
@@ -0,0 +1,4123 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_CORE_OPERATIONS_HPP__
+#define __OPENCV_CORE_OPERATIONS_HPP__
+
+#ifndef SKIP_INCLUDES
+ #include <string.h>
+ #include <limits.h>
+#endif // SKIP_INCLUDES
+
+
+#ifdef __cplusplus
+
+/////// exchange-add operation for atomic operations on reference counters ///////
+#if defined __INTEL_COMPILER && !(defined WIN32 || defined _WIN32) // atomic increment on the linux version of the Intel(tm) compiler
+ #define CV_XADD(addr,delta) _InterlockedExchangeAdd(const_cast<void*>(reinterpret_cast<volatile void*>(addr)), delta)
+#elif defined __GNUC__
+
+ #if defined __clang__ && __clang_major__ >= 3 && !defined __ANDROID__ && !defined __EMSCRIPTEN__ && !defined(__CUDACC__)
+ #ifdef __ATOMIC_SEQ_CST
+ #define CV_XADD(addr, delta) __c11_atomic_fetch_add((_Atomic(int)*)(addr), (delta), __ATOMIC_SEQ_CST)
+ #else
+ #define CV_XADD(addr, delta) __atomic_fetch_add((_Atomic(int)*)(addr), (delta), 5)
+ #endif
+ #elif __GNUC__*10 + __GNUC_MINOR__ >= 42
+
+ #if !(defined WIN32 || defined _WIN32) && (defined __i486__ || defined __i586__ || \
+ defined __i686__ || defined __MMX__ || defined __SSE__ || defined __ppc__) || \
+ (defined __GNUC__ && defined _STLPORT_MAJOR) || \
+ defined __EMSCRIPTEN__
+
+ #define CV_XADD __sync_fetch_and_add
+ #else
+ #include <ext/atomicity.h>
+ #define CV_XADD __gnu_cxx::__exchange_and_add
+ #endif
+
+ #else
+ #include <bits/atomicity.h>
+ #if __GNUC__*10 + __GNUC_MINOR__ >= 34
+ #define CV_XADD __gnu_cxx::__exchange_and_add
+ #else
+ #define CV_XADD __exchange_and_add
+ #endif
+ #endif
+
+#elif defined WIN32 || defined _WIN32 || defined WINCE
+ namespace cv { CV_EXPORTS int _interlockedExchangeAdd(int* addr, int delta); }
+ #define CV_XADD cv::_interlockedExchangeAdd
+
+#else
+ static inline int CV_XADD(int* addr, int delta)
+ { int tmp = *addr; *addr += delta; return tmp; }
+#endif
+
+#include <limits>
+
+#ifdef _MSC_VER
+# pragma warning(push)
+# pragma warning(disable:4127) //conditional expression is constant
+#endif
+
+namespace cv
+{
+
+using std::cos;
+using std::sin;
+using std::max;
+using std::min;
+using std::exp;
+using std::log;
+using std::pow;
+using std::sqrt;
+
+
+/////////////// saturate_cast (used in image & signal processing) ///////////////////
+
+template<typename _Tp> static inline _Tp saturate_cast(uchar v) { return _Tp(v); }
+template<typename _Tp> static inline _Tp saturate_cast(schar v) { return _Tp(v); }
+template<typename _Tp> static inline _Tp saturate_cast(ushort v) { return _Tp(v); }
+template<typename _Tp> static inline _Tp saturate_cast(short v) { return _Tp(v); }
+template<typename _Tp> static inline _Tp saturate_cast(unsigned v) { return _Tp(v); }
+template<typename _Tp> static inline _Tp saturate_cast(int v) { return _Tp(v); }
+template<typename _Tp> static inline _Tp saturate_cast(float v) { return _Tp(v); }
+template<typename _Tp> static inline _Tp saturate_cast(double v) { return _Tp(v); }
+
+template<> inline uchar saturate_cast<uchar>(schar v)
+{ return (uchar)std::max((int)v, 0); }
+template<> inline uchar saturate_cast<uchar>(ushort v)
+{ return (uchar)std::min((unsigned)v, (unsigned)UCHAR_MAX); }
+template<> inline uchar saturate_cast<uchar>(int v)
+{ return (uchar)((unsigned)v <= UCHAR_MAX ? v : v > 0 ? UCHAR_MAX : 0); }
+template<> inline uchar saturate_cast<uchar>(short v)
+{ return saturate_cast<uchar>((int)v); }
+template<> inline uchar saturate_cast<uchar>(unsigned v)
+{ return (uchar)std::min(v, (unsigned)UCHAR_MAX); }
+template<> inline uchar saturate_cast<uchar>(float v)
+{ int iv = cvRound(v); return saturate_cast<uchar>(iv); }
+template<> inline uchar saturate_cast<uchar>(double v)
+{ int iv = cvRound(v); return saturate_cast<uchar>(iv); }
+
+template<> inline schar saturate_cast<schar>(uchar v)
+{ return (schar)std::min((int)v, SCHAR_MAX); }
+template<> inline schar saturate_cast<schar>(ushort v)
+{ return (schar)std::min((unsigned)v, (unsigned)SCHAR_MAX); }
+template<> inline schar saturate_cast<schar>(int v)
+{
+ return (schar)((unsigned)(v-SCHAR_MIN) <= (unsigned)UCHAR_MAX ?
+ v : v > 0 ? SCHAR_MAX : SCHAR_MIN);
+}
+template<> inline schar saturate_cast<schar>(short v)
+{ return saturate_cast<schar>((int)v); }
+template<> inline schar saturate_cast<schar>(unsigned v)
+{ return (schar)std::min(v, (unsigned)SCHAR_MAX); }
+
+template<> inline schar saturate_cast<schar>(float v)
+{ int iv = cvRound(v); return saturate_cast<schar>(iv); }
+template<> inline schar saturate_cast<schar>(double v)
+{ int iv = cvRound(v); return saturate_cast<schar>(iv); }
+
+template<> inline ushort saturate_cast<ushort>(schar v)
+{ return (ushort)std::max((int)v, 0); }
+template<> inline ushort saturate_cast<ushort>(short v)
+{ return (ushort)std::max((int)v, 0); }
+template<> inline ushort saturate_cast<ushort>(int v)
+{ return (ushort)((unsigned)v <= (unsigned)USHRT_MAX ? v : v > 0 ? USHRT_MAX : 0); }
+template<> inline ushort saturate_cast<ushort>(unsigned v)
+{ return (ushort)std::min(v, (unsigned)USHRT_MAX); }
+template<> inline ushort saturate_cast<ushort>(float v)
+{ int iv = cvRound(v); return saturate_cast<ushort>(iv); }
+template<> inline ushort saturate_cast<ushort>(double v)
+{ int iv = cvRound(v); return saturate_cast<ushort>(iv); }
+
+template<> inline short saturate_cast<short>(ushort v)
+{ return (short)std::min((int)v, SHRT_MAX); }
+template<> inline short saturate_cast<short>(int v)
+{
+ return (short)((unsigned)(v - SHRT_MIN) <= (unsigned)USHRT_MAX ?
+ v : v > 0 ? SHRT_MAX : SHRT_MIN);
+}
+template<> inline short saturate_cast<short>(unsigned v)
+{ return (short)std::min(v, (unsigned)SHRT_MAX); }
+template<> inline short saturate_cast<short>(float v)
+{ int iv = cvRound(v); return saturate_cast<short>(iv); }
+template<> inline short saturate_cast<short>(double v)
+{ int iv = cvRound(v); return saturate_cast<short>(iv); }
+
+template<> inline int saturate_cast<int>(float v) { return cvRound(v); }
+template<> inline int saturate_cast<int>(double v) { return cvRound(v); }
+
+// we intentionally do not clip negative numbers, to make -1 become 0xffffffff etc.
+template<> inline unsigned saturate_cast<unsigned>(float v){ return cvRound(v); }
+template<> inline unsigned saturate_cast<unsigned>(double v) { return cvRound(v); }
+
+inline int fast_abs(uchar v) { return v; }
+inline int fast_abs(schar v) { return std::abs((int)v); }
+inline int fast_abs(ushort v) { return v; }
+inline int fast_abs(short v) { return std::abs((int)v); }
+inline int fast_abs(int v) { return std::abs(v); }
+inline float fast_abs(float v) { return std::abs(v); }
+inline double fast_abs(double v) { return std::abs(v); }
+
+//////////////////////////////// Matx /////////////////////////////////
+
+
+template<typename _Tp, int m, int n> inline Matx<_Tp, m, n>::Matx()
+{
+ for(int i = 0; i < channels; i++) val[i] = _Tp(0);
+}
+
+template<typename _Tp, int m, int n> inline Matx<_Tp, m, n>::Matx(_Tp v0)
+{
+ val[0] = v0;
+ for(int i = 1; i < channels; i++) val[i] = _Tp(0);
+}
+
+template<typename _Tp, int m, int n> inline Matx<_Tp, m, n>::Matx(_Tp v0, _Tp v1)
+{
+ assert(channels >= 2);
+ val[0] = v0; val[1] = v1;
+ for(int i = 2; i < channels; i++) val[i] = _Tp(0);
+}
+
+template<typename _Tp, int m, int n> inline Matx<_Tp, m, n>::Matx(_Tp v0, _Tp v1, _Tp v2)
+{
+ assert(channels >= 3);
+ val[0] = v0; val[1] = v1; val[2] = v2;
+ for(int i = 3; i < channels; i++) val[i] = _Tp(0);
+}
+
+template<typename _Tp, int m, int n> inline Matx<_Tp, m, n>::Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3)
+{
+ assert(channels >= 4);
+ val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3;
+ for(int i = 4; i < channels; i++) val[i] = _Tp(0);
+}
+
+template<typename _Tp, int m, int n> inline Matx<_Tp, m, n>::Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4)
+{
+ assert(channels >= 5);
+ val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3; val[4] = v4;
+ for(int i = 5; i < channels; i++) val[i] = _Tp(0);
+}
+
+template<typename _Tp, int m, int n> inline Matx<_Tp, m, n>::Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3,
+ _Tp v4, _Tp v5)
+{
+ assert(channels >= 6);
+ val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3;
+ val[4] = v4; val[5] = v5;
+ for(int i = 6; i < channels; i++) val[i] = _Tp(0);
+}
+
+template<typename _Tp, int m, int n> inline Matx<_Tp, m, n>::Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3,
+ _Tp v4, _Tp v5, _Tp v6)
+{
+ assert(channels >= 7);
+ val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3;
+ val[4] = v4; val[5] = v5; val[6] = v6;
+ for(int i = 7; i < channels; i++) val[i] = _Tp(0);
+}
+
+template<typename _Tp, int m, int n> inline Matx<_Tp, m, n>::Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3,
+ _Tp v4, _Tp v5, _Tp v6, _Tp v7)
+{
+ assert(channels >= 8);
+ val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3;
+ val[4] = v4; val[5] = v5; val[6] = v6; val[7] = v7;
+ for(int i = 8; i < channels; i++) val[i] = _Tp(0);
+}
+
+template<typename _Tp, int m, int n> inline Matx<_Tp, m, n>::Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3,
+ _Tp v4, _Tp v5, _Tp v6, _Tp v7,
+ _Tp v8)
+{
+ assert(channels >= 9);
+ val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3;
+ val[4] = v4; val[5] = v5; val[6] = v6; val[7] = v7;
+ val[8] = v8;
+ for(int i = 9; i < channels; i++) val[i] = _Tp(0);
+}
+
+template<typename _Tp, int m, int n> inline Matx<_Tp, m, n>::Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3,
+ _Tp v4, _Tp v5, _Tp v6, _Tp v7,
+ _Tp v8, _Tp v9)
+{
+ assert(channels >= 10);
+ val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3;
+ val[4] = v4; val[5] = v5; val[6] = v6; val[7] = v7;
+ val[8] = v8; val[9] = v9;
+ for(int i = 10; i < channels; i++) val[i] = _Tp(0);
+}
+
+
+template<typename _Tp, int m, int n>
+inline Matx<_Tp,m,n>::Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3,
+ _Tp v4, _Tp v5, _Tp v6, _Tp v7,
+ _Tp v8, _Tp v9, _Tp v10, _Tp v11)
+{
+ assert(channels == 12);
+ val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3;
+ val[4] = v4; val[5] = v5; val[6] = v6; val[7] = v7;
+ val[8] = v8; val[9] = v9; val[10] = v10; val[11] = v11;
+}
+
+template<typename _Tp, int m, int n>
+inline Matx<_Tp,m,n>::Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3,
+ _Tp v4, _Tp v5, _Tp v6, _Tp v7,
+ _Tp v8, _Tp v9, _Tp v10, _Tp v11,
+ _Tp v12, _Tp v13, _Tp v14, _Tp v15)
+{
+ assert(channels == 16);
+ val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3;
+ val[4] = v4; val[5] = v5; val[6] = v6; val[7] = v7;
+ val[8] = v8; val[9] = v9; val[10] = v10; val[11] = v11;
+ val[12] = v12; val[13] = v13; val[14] = v14; val[15] = v15;
+}
+
+template<typename _Tp, int m, int n> inline Matx<_Tp, m, n>::Matx(const _Tp* values)
+{
+ for( int i = 0; i < channels; i++ ) val[i] = values[i];
+}
+
+template<typename _Tp, int m, int n> inline Matx<_Tp, m, n> Matx<_Tp, m, n>::all(_Tp alpha)
+{
+ Matx<_Tp, m, n> M;
+ for( int i = 0; i < m*n; i++ ) M.val[i] = alpha;
+ return M;
+}
+
+template<typename _Tp, int m, int n> inline
+Matx<_Tp,m,n> Matx<_Tp,m,n>::zeros()
+{
+ return all(0);
+}
+
+template<typename _Tp, int m, int n> inline
+Matx<_Tp,m,n> Matx<_Tp,m,n>::ones()
+{
+ return all(1);
+}
+
+template<typename _Tp, int m, int n> inline
+Matx<_Tp,m,n> Matx<_Tp,m,n>::eye()
+{
+ Matx<_Tp,m,n> M;
+ for(int i = 0; i < MIN(m,n); i++)
+ M(i,i) = 1;
+ return M;
+}
+
+template<typename _Tp, int m, int n> inline _Tp Matx<_Tp, m, n>::dot(const Matx<_Tp, m, n>& M) const
+{
+ _Tp s = 0;
+ for( int i = 0; i < m*n; i++ ) s += val[i]*M.val[i];
+ return s;
+}
+
+
+template<typename _Tp, int m, int n> inline double Matx<_Tp, m, n>::ddot(const Matx<_Tp, m, n>& M) const
+{
+ double s = 0;
+ for( int i = 0; i < m*n; i++ ) s += (double)val[i]*M.val[i];
+ return s;
+}
+
+
+/** @cond IGNORED */
+template<typename _Tp, int m, int n> inline
+Matx<_Tp,m,n> Matx<_Tp,m,n>::diag(const typename Matx<_Tp,m,n>::diag_type& d)
+{
+ Matx<_Tp,m,n> M;
+ for(int i = 0; i < MIN(m,n); i++)
+ M(i,i) = d(i, 0);
+ return M;
+}
+/** @endcond */
+
+template<typename _Tp, int m, int n> inline
+Matx<_Tp,m,n> Matx<_Tp,m,n>::randu(_Tp a, _Tp b)
+{
+ Matx<_Tp,m,n> M;
+ Mat matM(M, false);
+ cv::randu(matM, Scalar(a), Scalar(b));
+ return M;
+}
+
+template<typename _Tp, int m, int n> inline
+Matx<_Tp,m,n> Matx<_Tp,m,n>::randn(_Tp a, _Tp b)
+{
+ Matx<_Tp,m,n> M;
+ Mat matM(M, false);
+ cv::randn(matM, Scalar(a), Scalar(b));
+ return M;
+}
+
+template<typename _Tp, int m, int n> template<typename T2>
+inline Matx<_Tp, m, n>::operator Matx<T2, m, n>() const
+{
+ Matx<T2, m, n> M;
+ for( int i = 0; i < m*n; i++ ) M.val[i] = saturate_cast<T2>(val[i]);
+ return M;
+}
+
+
+template<typename _Tp, int m, int n> template<int m1, int n1> inline
+Matx<_Tp, m1, n1> Matx<_Tp, m, n>::reshape() const
+{
+ CV_DbgAssert(m1*n1 == m*n);
+ return (const Matx<_Tp, m1, n1>&)*this;
+}
+
+
+template<typename _Tp, int m, int n>
+template<int m1, int n1> inline
+Matx<_Tp, m1, n1> Matx<_Tp, m, n>::get_minor(int i, int j) const
+{
+ CV_DbgAssert(0 <= i && i+m1 <= m && 0 <= j && j+n1 <= n);
+ Matx<_Tp, m1, n1> s;
+ for( int di = 0; di < m1; di++ )
+ for( int dj = 0; dj < n1; dj++ )
+ s(di, dj) = (*this)(i+di, j+dj);
+ return s;
+}
+
+
+template<typename _Tp, int m, int n> inline
+Matx<_Tp, 1, n> Matx<_Tp, m, n>::row(int i) const
+{
+ CV_DbgAssert((unsigned)i < (unsigned)m);
+ return Matx<_Tp, 1, n>(&val[i*n]);
+}
+
+
+template<typename _Tp, int m, int n> inline
+Matx<_Tp, m, 1> Matx<_Tp, m, n>::col(int j) const
+{
+ CV_DbgAssert((unsigned)j < (unsigned)n);
+ Matx<_Tp, m, 1> v;
+ for( int i = 0; i < m; i++ )
+ v.val[i] = val[i*n + j];
+ return v;
+}
+
+
+template<typename _Tp, int m, int n> inline
+typename Matx<_Tp, m, n>::diag_type Matx<_Tp, m, n>::diag() const
+{
+ diag_type d;
+ for( int i = 0; i < MIN(m, n); i++ )
+ d.val[i] = val[i*n + i];
+ return d;
+}
+
+
+template<typename _Tp, int m, int n> inline
+const _Tp& Matx<_Tp, m, n>::operator ()(int i, int j) const
+{
+ CV_DbgAssert( (unsigned)i < (unsigned)m && (unsigned)j < (unsigned)n );
+ return this->val[i*n + j];
+}
+
+
+template<typename _Tp, int m, int n> inline
+_Tp& Matx<_Tp, m, n>::operator ()(int i, int j)
+{
+ CV_DbgAssert( (unsigned)i < (unsigned)m && (unsigned)j < (unsigned)n );
+ return val[i*n + j];
+}
+
+
+template<typename _Tp, int m, int n> inline
+const _Tp& Matx<_Tp, m, n>::operator ()(int i) const
+{
+ CV_DbgAssert( (m == 1 || n == 1) && (unsigned)i < (unsigned)(m+n-1) );
+ return val[i];
+}
+
+
+template<typename _Tp, int m, int n> inline
+_Tp& Matx<_Tp, m, n>::operator ()(int i)
+{
+ CV_DbgAssert( (m == 1 || n == 1) && (unsigned)i < (unsigned)(m+n-1) );
+ return val[i];
+}
+
+
+template<typename _Tp1, typename _Tp2, int m, int n> static inline
+Matx<_Tp1, m, n>& operator += (Matx<_Tp1, m, n>& a, const Matx<_Tp2, m, n>& b)
+{
+ for( int i = 0; i < m*n; i++ )
+ a.val[i] = saturate_cast<_Tp1>(a.val[i] + b.val[i]);
+ return a;
+}
+
+
+template<typename _Tp1, typename _Tp2, int m, int n> static inline
+Matx<_Tp1, m, n>& operator -= (Matx<_Tp1, m, n>& a, const Matx<_Tp2, m, n>& b)
+{
+ for( int i = 0; i < m*n; i++ )
+ a.val[i] = saturate_cast<_Tp1>(a.val[i] - b.val[i]);
+ return a;
+}
+
+
+template<typename _Tp, int m, int n> inline
+Matx<_Tp,m,n>::Matx(const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b, Matx_AddOp)
+{
+ for( int i = 0; i < m*n; i++ )
+ val[i] = saturate_cast<_Tp>(a.val[i] + b.val[i]);
+}
+
+
+template<typename _Tp, int m, int n> inline
+Matx<_Tp,m,n>::Matx(const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b, Matx_SubOp)
+{
+ for( int i = 0; i < m*n; i++ )
+ val[i] = saturate_cast<_Tp>(a.val[i] - b.val[i]);
+}
+
+
+template<typename _Tp, int m, int n> template<typename _T2> inline
+Matx<_Tp,m,n>::Matx(const Matx<_Tp, m, n>& a, _T2 alpha, Matx_ScaleOp)
+{
+ for( int i = 0; i < m*n; i++ )
+ val[i] = saturate_cast<_Tp>(a.val[i] * alpha);
+}
+
+
+template<typename _Tp, int m, int n> inline
+Matx<_Tp,m,n>::Matx(const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b, Matx_MulOp)
+{
+ for( int i = 0; i < m*n; i++ )
+ val[i] = saturate_cast<_Tp>(a.val[i] * b.val[i]);
+}
+
+
+template<typename _Tp, int m, int n> template<int l> inline
+Matx<_Tp,m,n>::Matx(const Matx<_Tp, m, l>& a, const Matx<_Tp, l, n>& b, Matx_MatMulOp)
+{
+ for( int i = 0; i < m; i++ )
+ for( int j = 0; j < n; j++ )
+ {
+ _Tp s = 0;
+ for( int k = 0; k < l; k++ )
+ s += a(i, k) * b(k, j);
+ val[i*n + j] = s;
+ }
+}
+
+
+template<typename _Tp, int m, int n> inline
+Matx<_Tp,m,n>::Matx(const Matx<_Tp, n, m>& a, Matx_TOp)
+{
+ for( int i = 0; i < m; i++ )
+ for( int j = 0; j < n; j++ )
+ val[i*n + j] = a(j, i);
+}
+
+
+template<typename _Tp, int m, int n> static inline
+Matx<_Tp, m, n> operator + (const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b)
+{
+ return Matx<_Tp, m, n>(a, b, Matx_AddOp());
+}
+
+
+template<typename _Tp, int m, int n> static inline
+Matx<_Tp, m, n> operator - (const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b)
+{
+ return Matx<_Tp, m, n>(a, b, Matx_SubOp());
+}
+
+
+template<typename _Tp, int m, int n> static inline
+Matx<_Tp, m, n>& operator *= (Matx<_Tp, m, n>& a, int alpha)
+{
+ for( int i = 0; i < m*n; i++ )
+ a.val[i] = saturate_cast<_Tp>(a.val[i] * alpha);
+ return a;
+}
+
+template<typename _Tp, int m, int n> static inline
+Matx<_Tp, m, n>& operator *= (Matx<_Tp, m, n>& a, float alpha)
+{
+ for( int i = 0; i < m*n; i++ )
+ a.val[i] = saturate_cast<_Tp>(a.val[i] * alpha);
+ return a;
+}
+
+template<typename _Tp, int m, int n> static inline
+Matx<_Tp, m, n>& operator *= (Matx<_Tp, m, n>& a, double alpha)
+{
+ for( int i = 0; i < m*n; i++ )
+ a.val[i] = saturate_cast<_Tp>(a.val[i] * alpha);
+ return a;
+}
+
+template<typename _Tp, int m, int n> static inline
+Matx<_Tp, m, n> operator * (const Matx<_Tp, m, n>& a, int alpha)
+{
+ return Matx<_Tp, m, n>(a, alpha, Matx_ScaleOp());
+}
+
+template<typename _Tp, int m, int n> static inline
+Matx<_Tp, m, n> operator * (const Matx<_Tp, m, n>& a, float alpha)
+{
+ return Matx<_Tp, m, n>(a, alpha, Matx_ScaleOp());
+}
+
+template<typename _Tp, int m, int n> static inline
+Matx<_Tp, m, n> operator * (const Matx<_Tp, m, n>& a, double alpha)
+{
+ return Matx<_Tp, m, n>(a, alpha, Matx_ScaleOp());
+}
+
+template<typename _Tp, int m, int n> static inline
+Matx<_Tp, m, n> operator * (int alpha, const Matx<_Tp, m, n>& a)
+{
+ return Matx<_Tp, m, n>(a, alpha, Matx_ScaleOp());
+}
+
+template<typename _Tp, int m, int n> static inline
+Matx<_Tp, m, n> operator * (float alpha, const Matx<_Tp, m, n>& a)
+{
+ return Matx<_Tp, m, n>(a, alpha, Matx_ScaleOp());
+}
+
+template<typename _Tp, int m, int n> static inline
+Matx<_Tp, m, n> operator * (double alpha, const Matx<_Tp, m, n>& a)
+{
+ return Matx<_Tp, m, n>(a, alpha, Matx_ScaleOp());
+}
+
+template<typename _Tp, int m, int n> static inline
+Matx<_Tp, m, n> operator - (const Matx<_Tp, m, n>& a)
+{
+ return Matx<_Tp, m, n>(a, -1, Matx_ScaleOp());
+}
+
+
+template<typename _Tp, int m, int n, int l> static inline
+Matx<_Tp, m, n> operator * (const Matx<_Tp, m, l>& a, const Matx<_Tp, l, n>& b)
+{
+ return Matx<_Tp, m, n>(a, b, Matx_MatMulOp());
+}
+
+
+template<typename _Tp, int m, int n> static inline
+Vec<_Tp, m> operator * (const Matx<_Tp, m, n>& a, const Vec<_Tp, n>& b)
+{
+ Matx<_Tp, m, 1> c(a, b, Matx_MatMulOp());
+ return reinterpret_cast<const Vec<_Tp, m>&>(c);
+}
+
+
+template<typename _Tp> static inline
+Point_<_Tp> operator * (const Matx<_Tp, 2, 2>& a, const Point_<_Tp>& b)
+{
+ Matx<_Tp, 2, 1> tmp = a*Vec<_Tp,2>(b.x, b.y);
+ return Point_<_Tp>(tmp.val[0], tmp.val[1]);
+}
+
+
+template<typename _Tp> static inline
+Point3_<_Tp> operator * (const Matx<_Tp, 3, 3>& a, const Point3_<_Tp>& b)
+{
+ Matx<_Tp, 3, 1> tmp = a*Vec<_Tp,3>(b.x, b.y, b.z);
+ return Point3_<_Tp>(tmp.val[0], tmp.val[1], tmp.val[2]);
+}
+
+
+template<typename _Tp> static inline
+Point3_<_Tp> operator * (const Matx<_Tp, 3, 3>& a, const Point_<_Tp>& b)
+{
+ Matx<_Tp, 3, 1> tmp = a*Vec<_Tp,3>(b.x, b.y, 1);
+ return Point3_<_Tp>(tmp.val[0], tmp.val[1], tmp.val[2]);
+}
+
+
+template<typename _Tp> static inline
+Matx<_Tp, 4, 1> operator * (const Matx<_Tp, 4, 4>& a, const Point3_<_Tp>& b)
+{
+ return a*Matx<_Tp, 4, 1>(b.x, b.y, b.z, 1);
+}
+
+
+template<typename _Tp> static inline
+Scalar operator * (const Matx<_Tp, 4, 4>& a, const Scalar& b)
+{
+ Matx<double, 4, 1> c(Matx<double, 4, 4>(a), b, Matx_MatMulOp());
+ return static_cast<const Scalar&>(c);
+}
+
+
+static inline
+Scalar operator * (const Matx<double, 4, 4>& a, const Scalar& b)
+{
+ Matx<double, 4, 1> c(a, b, Matx_MatMulOp());
+ return static_cast<const Scalar&>(c);
+}
+
+
+template<typename _Tp, int m, int n> inline
+Matx<_Tp, m, n> Matx<_Tp, m, n>::mul(const Matx<_Tp, m, n>& a) const
+{
+ return Matx<_Tp, m, n>(*this, a, Matx_MulOp());
+}
+
+
+CV_EXPORTS int LU(float* A, size_t astep, int m, float* b, size_t bstep, int n);
+CV_EXPORTS int LU(double* A, size_t astep, int m, double* b, size_t bstep, int n);
+CV_EXPORTS bool Cholesky(float* A, size_t astep, int m, float* b, size_t bstep, int n);
+CV_EXPORTS bool Cholesky(double* A, size_t astep, int m, double* b, size_t bstep, int n);
+
+
+template<typename _Tp, int m> struct Matx_DetOp
+{
+ double operator ()(const Matx<_Tp, m, m>& a) const
+ {
+ Matx<_Tp, m, m> temp = a;
+ double p = LU(temp.val, m*sizeof(_Tp), m, 0, 0, 0);
+ if( p == 0 )
+ return p;
+ for( int i = 0; i < m; i++ )
+ p *= temp(i, i);
+ return 1./p;
+ }
+};
+
+
+template<typename _Tp> struct Matx_DetOp<_Tp, 1>
+{
+ double operator ()(const Matx<_Tp, 1, 1>& a) const
+ {
+ return a(0,0);
+ }
+};
+
+
+template<typename _Tp> struct Matx_DetOp<_Tp, 2>
+{
+ double operator ()(const Matx<_Tp, 2, 2>& a) const
+ {
+ return a(0,0)*a(1,1) - a(0,1)*a(1,0);
+ }
+};
+
+
+template<typename _Tp> struct Matx_DetOp<_Tp, 3>
+{
+ double operator ()(const Matx<_Tp, 3, 3>& a) const
+ {
+ return a(0,0)*(a(1,1)*a(2,2) - a(2,1)*a(1,2)) -
+ a(0,1)*(a(1,0)*a(2,2) - a(2,0)*a(1,2)) +
+ a(0,2)*(a(1,0)*a(2,1) - a(2,0)*a(1,1));
+ }
+};
+
+template<typename _Tp, int m> static inline
+double determinant(const Matx<_Tp, m, m>& a)
+{
+ return Matx_DetOp<_Tp, m>()(a);
+}
+
+
+template<typename _Tp, int m, int n> static inline
+double trace(const Matx<_Tp, m, n>& a)
+{
+ _Tp s = 0;
+ for( int i = 0; i < std::min(m, n); i++ )
+ s += a(i,i);
+ return s;
+}
+
+
+template<typename _Tp, int m, int n> inline
+Matx<_Tp, n, m> Matx<_Tp, m, n>::t() const
+{
+ return Matx<_Tp, n, m>(*this, Matx_TOp());
+}
+
+
+template<typename _Tp, int m> struct Matx_FastInvOp
+{
+ bool operator()(const Matx<_Tp, m, m>& a, Matx<_Tp, m, m>& b, int method) const
+ {
+ Matx<_Tp, m, m> temp = a;
+
+ // assume that b is all 0's on input => make it a unity matrix
+ for( int i = 0; i < m; i++ )
+ b(i, i) = (_Tp)1;
+
+ if( method == DECOMP_CHOLESKY )
+ return Cholesky(temp.val, m*sizeof(_Tp), m, b.val, m*sizeof(_Tp), m);
+
+ return LU(temp.val, m*sizeof(_Tp), m, b.val, m*sizeof(_Tp), m) != 0;
+ }
+};
+
+
+template<typename _Tp> struct Matx_FastInvOp<_Tp, 2>
+{
+ bool operator()(const Matx<_Tp, 2, 2>& a, Matx<_Tp, 2, 2>& b, int) const
+ {
+ _Tp d = determinant(a);
+ if( d == 0 )
+ return false;
+ d = 1/d;
+ b(1,1) = a(0,0)*d;
+ b(0,0) = a(1,1)*d;
+ b(0,1) = -a(0,1)*d;
+ b(1,0) = -a(1,0)*d;
+ return true;
+ }
+};
+
+
+template<typename _Tp> struct Matx_FastInvOp<_Tp, 3>
+{
+ bool operator()(const Matx<_Tp, 3, 3>& a, Matx<_Tp, 3, 3>& b, int) const
+ {
+ _Tp d = (_Tp)determinant(a);
+ if( d == 0 )
+ return false;
+ d = 1/d;
+ b(0,0) = (a(1,1) * a(2,2) - a(1,2) * a(2,1)) * d;
+ b(0,1) = (a(0,2) * a(2,1) - a(0,1) * a(2,2)) * d;
+ b(0,2) = (a(0,1) * a(1,2) - a(0,2) * a(1,1)) * d;
+
+ b(1,0) = (a(1,2) * a(2,0) - a(1,0) * a(2,2)) * d;
+ b(1,1) = (a(0,0) * a(2,2) - a(0,2) * a(2,0)) * d;
+ b(1,2) = (a(0,2) * a(1,0) - a(0,0) * a(1,2)) * d;
+
+ b(2,0) = (a(1,0) * a(2,1) - a(1,1) * a(2,0)) * d;
+ b(2,1) = (a(0,1) * a(2,0) - a(0,0) * a(2,1)) * d;
+ b(2,2) = (a(0,0) * a(1,1) - a(0,1) * a(1,0)) * d;
+ return true;
+ }
+};
+
+
+template<typename _Tp, int m, int n> inline
+Matx<_Tp, n, m> Matx<_Tp, m, n>::inv(int method) const
+{
+ Matx<_Tp, n, m> b;
+ bool ok;
+ if( method == DECOMP_LU || method == DECOMP_CHOLESKY )
+ ok = Matx_FastInvOp<_Tp, m>()(*this, b, method);
+ else
+ {
+ Mat A(*this, false), B(b, false);
+ ok = (invert(A, B, method) != 0);
+ }
+ return ok ? b : Matx<_Tp, n, m>::zeros();
+}
+
+
+template<typename _Tp, int m, int n> struct Matx_FastSolveOp
+{
+ bool operator()(const Matx<_Tp, m, m>& a, const Matx<_Tp, m, n>& b,
+ Matx<_Tp, m, n>& x, int method) const
+ {
+ Matx<_Tp, m, m> temp = a;
+ x = b;
+ if( method == DECOMP_CHOLESKY )
+ return Cholesky(temp.val, m*sizeof(_Tp), m, x.val, n*sizeof(_Tp), n);
+
+ return LU(temp.val, m*sizeof(_Tp), m, x.val, n*sizeof(_Tp), n) != 0;
+ }
+};
+
+
+template<typename _Tp> struct Matx_FastSolveOp<_Tp, 2, 1>
+{
+ bool operator()(const Matx<_Tp, 2, 2>& a, const Matx<_Tp, 2, 1>& b,
+ Matx<_Tp, 2, 1>& x, int) const
+ {
+ _Tp d = determinant(a);
+ if( d == 0 )
+ return false;
+ d = 1/d;
+ x(0) = (b(0)*a(1,1) - b(1)*a(0,1))*d;
+ x(1) = (b(1)*a(0,0) - b(0)*a(1,0))*d;
+ return true;
+ }
+};
+
+
+template<typename _Tp> struct Matx_FastSolveOp<_Tp, 3, 1>
+{
+ bool operator()(const Matx<_Tp, 3, 3>& a, const Matx<_Tp, 3, 1>& b,
+ Matx<_Tp, 3, 1>& x, int) const
+ {
+ _Tp d = (_Tp)determinant(a);
+ if( d == 0 )
+ return false;
+ d = 1/d;
+ x(0) = d*(b(0)*(a(1,1)*a(2,2) - a(1,2)*a(2,1)) -
+ a(0,1)*(b(1)*a(2,2) - a(1,2)*b(2)) +
+ a(0,2)*(b(1)*a(2,1) - a(1,1)*b(2)));
+
+ x(1) = d*(a(0,0)*(b(1)*a(2,2) - a(1,2)*b(2)) -
+ b(0)*(a(1,0)*a(2,2) - a(1,2)*a(2,0)) +
+ a(0,2)*(a(1,0)*b(2) - b(1)*a(2,0)));
+
+ x(2) = d*(a(0,0)*(a(1,1)*b(2) - b(1)*a(2,1)) -
+ a(0,1)*(a(1,0)*b(2) - b(1)*a(2,0)) +
+ b(0)*(a(1,0)*a(2,1) - a(1,1)*a(2,0)));
+ return true;
+ }
+};
+
+
+template<typename _Tp, int m, int n> template<int l> inline
+Matx<_Tp, n, l> Matx<_Tp, m, n>::solve(const Matx<_Tp, m, l>& rhs, int method) const
+{
+ Matx<_Tp, n, l> x;
+ bool ok;
+ if( method == DECOMP_LU || method == DECOMP_CHOLESKY )
+ ok = Matx_FastSolveOp<_Tp, m, l>()(*this, rhs, x, method);
+ else
+ {
+ Mat A(*this, false), B(rhs, false), X(x, false);
+ ok = cv::solve(A, B, X, method);
+ }
+
+ return ok ? x : Matx<_Tp, n, l>::zeros();
+}
+
+template<typename _Tp, int m, int n> inline
+Vec<_Tp, n> Matx<_Tp, m, n>::solve(const Vec<_Tp, m>& rhs, int method) const
+{
+ Matx<_Tp, n, 1> x = solve(reinterpret_cast<const Matx<_Tp, m, 1>&>(rhs), method);
+ return reinterpret_cast<Vec<_Tp, n>&>(x);
+}
+
+template<typename _Tp, typename _AccTp> static inline
+_AccTp normL2Sqr(const _Tp* a, int n)
+{
+ _AccTp s = 0;
+ int i=0;
+ #if CV_ENABLE_UNROLLED
+ for( ; i <= n - 4; i += 4 )
+ {
+ _AccTp v0 = a[i], v1 = a[i+1], v2 = a[i+2], v3 = a[i+3];
+ s += v0*v0 + v1*v1 + v2*v2 + v3*v3;
+ }
+#endif
+ for( ; i < n; i++ )
+ {
+ _AccTp v = a[i];
+ s += v*v;
+ }
+ return s;
+}
+
+
+template<typename _Tp, typename _AccTp> static inline
+_AccTp normL1(const _Tp* a, int n)
+{
+ _AccTp s = 0;
+ int i = 0;
+#if CV_ENABLE_UNROLLED
+ for(; i <= n - 4; i += 4 )
+ {
+ s += (_AccTp)fast_abs(a[i]) + (_AccTp)fast_abs(a[i+1]) +
+ (_AccTp)fast_abs(a[i+2]) + (_AccTp)fast_abs(a[i+3]);
+ }
+#endif
+ for( ; i < n; i++ )
+ s += fast_abs(a[i]);
+ return s;
+}
+
+
+template<typename _Tp, typename _AccTp> static inline
+_AccTp normInf(const _Tp* a, int n)
+{
+ _AccTp s = 0;
+ for( int i = 0; i < n; i++ )
+ s = std::max(s, (_AccTp)fast_abs(a[i]));
+ return s;
+}
+
+
+template<typename _Tp, typename _AccTp> static inline
+_AccTp normL2Sqr(const _Tp* a, const _Tp* b, int n)
+{
+ _AccTp s = 0;
+ int i= 0;
+#if CV_ENABLE_UNROLLED
+ for(; i <= n - 4; i += 4 )
+ {
+ _AccTp v0 = _AccTp(a[i] - b[i]), v1 = _AccTp(a[i+1] - b[i+1]), v2 = _AccTp(a[i+2] - b[i+2]), v3 = _AccTp(a[i+3] - b[i+3]);
+ s += v0*v0 + v1*v1 + v2*v2 + v3*v3;
+ }
+#endif
+ for( ; i < n; i++ )
+ {
+ _AccTp v = _AccTp(a[i] - b[i]);
+ s += v*v;
+ }
+ return s;
+}
+
+CV_EXPORTS float normL2Sqr_(const float* a, const float* b, int n);
+CV_EXPORTS float normL1_(const float* a, const float* b, int n);
+CV_EXPORTS int normL1_(const uchar* a, const uchar* b, int n);
+CV_EXPORTS int normHamming(const uchar* a, const uchar* b, int n);
+CV_EXPORTS int normHamming(const uchar* a, const uchar* b, int n, int cellSize);
+
+template<> inline float normL2Sqr(const float* a, const float* b, int n)
+{
+ if( n >= 8 )
+ return normL2Sqr_(a, b, n);
+ float s = 0;
+ for( int i = 0; i < n; i++ )
+ {
+ float v = a[i] - b[i];
+ s += v*v;
+ }
+ return s;
+}
+
+
+template<typename _Tp, typename _AccTp> static inline
+_AccTp normL1(const _Tp* a, const _Tp* b, int n)
+{
+ _AccTp s = 0;
+ int i= 0;
+#if CV_ENABLE_UNROLLED
+ for(; i <= n - 4; i += 4 )
+ {
+ _AccTp v0 = _AccTp(a[i] - b[i]), v1 = _AccTp(a[i+1] - b[i+1]), v2 = _AccTp(a[i+2] - b[i+2]), v3 = _AccTp(a[i+3] - b[i+3]);
+ s += std::abs(v0) + std::abs(v1) + std::abs(v2) + std::abs(v3);
+ }
+#endif
+ for( ; i < n; i++ )
+ {
+ _AccTp v = _AccTp(a[i] - b[i]);
+ s += std::abs(v);
+ }
+ return s;
+}
+
+template<> inline float normL1(const float* a, const float* b, int n)
+{
+ if( n >= 8 )
+ return normL1_(a, b, n);
+ float s = 0;
+ for( int i = 0; i < n; i++ )
+ {
+ float v = a[i] - b[i];
+ s += std::abs(v);
+ }
+ return s;
+}
+
+template<> inline int normL1(const uchar* a, const uchar* b, int n)
+{
+ return normL1_(a, b, n);
+}
+
+template<typename _Tp, typename _AccTp> static inline
+_AccTp normInf(const _Tp* a, const _Tp* b, int n)
+{
+ _AccTp s = 0;
+ for( int i = 0; i < n; i++ )
+ {
+ _AccTp v0 = a[i] - b[i];
+ s = std::max(s, std::abs(v0));
+ }
+ return s;
+}
+
+
+template<typename _Tp, int m, int n> static inline
+double norm(const Matx<_Tp, m, n>& M)
+{
+ return std::sqrt(normL2Sqr<_Tp, double>(M.val, m*n));
+}
+
+
+template<typename _Tp, int m, int n> static inline
+double norm(const Matx<_Tp, m, n>& M, int normType)
+{
+ return normType == NORM_INF ? (double)normInf<_Tp, typename DataType<_Tp>::work_type>(M.val, m*n) :
+ normType == NORM_L1 ? (double)normL1<_Tp, typename DataType<_Tp>::work_type>(M.val, m*n) :
+ std::sqrt((double)normL2Sqr<_Tp, typename DataType<_Tp>::work_type>(M.val, m*n));
+}
+
+
+template<typename _Tp, int m, int n> static inline
+bool operator == (const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b)
+{
+ for( int i = 0; i < m*n; i++ )
+ if( a.val[i] != b.val[i] ) return false;
+ return true;
+}
+
+template<typename _Tp, int m, int n> static inline
+bool operator != (const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b)
+{
+ return !(a == b);
+}
+
+
+template<typename _Tp, typename _T2, int m, int n> static inline
+MatxCommaInitializer<_Tp, m, n> operator << (const Matx<_Tp, m, n>& mtx, _T2 val)
+{
+ MatxCommaInitializer<_Tp, m, n> commaInitializer((Matx<_Tp, m, n>*)&mtx);
+ return (commaInitializer, val);
+}
+
+template<typename _Tp, int m, int n> inline
+MatxCommaInitializer<_Tp, m, n>::MatxCommaInitializer(Matx<_Tp, m, n>* _mtx)
+ : dst(_mtx), idx(0)
+{}
+
+template<typename _Tp, int m, int n> template<typename _T2> inline
+MatxCommaInitializer<_Tp, m, n>& MatxCommaInitializer<_Tp, m, n>::operator , (_T2 value)
+{
+ CV_DbgAssert( idx < m*n );
+ dst->val[idx++] = saturate_cast<_Tp>(value);
+ return *this;
+}
+
+template<typename _Tp, int m, int n> inline
+Matx<_Tp, m, n> MatxCommaInitializer<_Tp, m, n>::operator *() const
+{
+ CV_DbgAssert( idx == n*m );
+ return *dst;
+}
+
+/////////////////////////// short vector (Vec) /////////////////////////////
+
+template<typename _Tp, int cn> inline Vec<_Tp, cn>::Vec()
+{}
+
+template<typename _Tp, int cn> inline Vec<_Tp, cn>::Vec(_Tp v0)
+ : Matx<_Tp, cn, 1>(v0)
+{}
+
+template<typename _Tp, int cn> inline Vec<_Tp, cn>::Vec(_Tp v0, _Tp v1)
+ : Matx<_Tp, cn, 1>(v0, v1)
+{}
+
+template<typename _Tp, int cn> inline Vec<_Tp, cn>::Vec(_Tp v0, _Tp v1, _Tp v2)
+ : Matx<_Tp, cn, 1>(v0, v1, v2)
+{}
+
+template<typename _Tp, int cn> inline Vec<_Tp, cn>::Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3)
+ : Matx<_Tp, cn, 1>(v0, v1, v2, v3)
+{}
+
+template<typename _Tp, int cn> inline Vec<_Tp, cn>::Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4)
+ : Matx<_Tp, cn, 1>(v0, v1, v2, v3, v4)
+{}
+
+template<typename _Tp, int cn> inline Vec<_Tp, cn>::Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5)
+ : Matx<_Tp, cn, 1>(v0, v1, v2, v3, v4, v5)
+{}
+
+template<typename _Tp, int cn> inline Vec<_Tp, cn>::Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3,
+ _Tp v4, _Tp v5, _Tp v6)
+ : Matx<_Tp, cn, 1>(v0, v1, v2, v3, v4, v5, v6)
+{}
+
+template<typename _Tp, int cn> inline Vec<_Tp, cn>::Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3,
+ _Tp v4, _Tp v5, _Tp v6, _Tp v7)
+ : Matx<_Tp, cn, 1>(v0, v1, v2, v3, v4, v5, v6, v7)
+{}
+
+template<typename _Tp, int cn> inline Vec<_Tp, cn>::Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3,
+ _Tp v4, _Tp v5, _Tp v6, _Tp v7,
+ _Tp v8)
+ : Matx<_Tp, cn, 1>(v0, v1, v2, v3, v4, v5, v6, v7, v8)
+{}
+
+template<typename _Tp, int cn> inline Vec<_Tp, cn>::Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3,
+ _Tp v4, _Tp v5, _Tp v6, _Tp v7,
+ _Tp v8, _Tp v9)
+ : Matx<_Tp, cn, 1>(v0, v1, v2, v3, v4, v5, v6, v7, v8, v9)
+{}
+
+template<typename _Tp, int cn> inline Vec<_Tp, cn>::Vec(const _Tp* values)
+ : Matx<_Tp, cn, 1>(values)
+{}
+
+
+template<typename _Tp, int cn> inline Vec<_Tp, cn>::Vec(const Vec<_Tp, cn>& m)
+ : Matx<_Tp, cn, 1>(m.val)
+{}
+
+template<typename _Tp, int cn> inline
+Vec<_Tp, cn>::Vec(const Matx<_Tp, cn, 1>& a, const Matx<_Tp, cn, 1>& b, Matx_AddOp op)
+: Matx<_Tp, cn, 1>(a, b, op)
+{}
+
+template<typename _Tp, int cn> inline
+Vec<_Tp, cn>::Vec(const Matx<_Tp, cn, 1>& a, const Matx<_Tp, cn, 1>& b, Matx_SubOp op)
+: Matx<_Tp, cn, 1>(a, b, op)
+{}
+
+template<typename _Tp, int cn> template<typename _T2> inline
+Vec<_Tp, cn>::Vec(const Matx<_Tp, cn, 1>& a, _T2 alpha, Matx_ScaleOp op)
+: Matx<_Tp, cn, 1>(a, alpha, op)
+{}
+
+template<typename _Tp, int cn> inline Vec<_Tp, cn> Vec<_Tp, cn>::all(_Tp alpha)
+{
+ Vec v;
+ for( int i = 0; i < cn; i++ ) v.val[i] = alpha;
+ return v;
+}
+
+template<typename _Tp, int cn> inline Vec<_Tp, cn> Vec<_Tp, cn>::mul(const Vec<_Tp, cn>& v) const
+{
+ Vec<_Tp, cn> w;
+ for( int i = 0; i < cn; i++ ) w.val[i] = saturate_cast<_Tp>(this->val[i]*v.val[i]);
+ return w;
+}
+
+template<typename _Tp> Vec<_Tp, 2> conjugate(const Vec<_Tp, 2>& v)
+{
+ return Vec<_Tp, 2>(v[0], -v[1]);
+}
+
+template<typename _Tp> Vec<_Tp, 4> conjugate(const Vec<_Tp, 4>& v)
+{
+ return Vec<_Tp, 4>(v[0], -v[1], -v[2], -v[3]);
+}
+
+template<> inline Vec<float, 2> Vec<float, 2>::conj() const
+{
+ return conjugate(*this);
+}
+
+template<> inline Vec<double, 2> Vec<double, 2>::conj() const
+{
+ return conjugate(*this);
+}
+
+template<> inline Vec<float, 4> Vec<float, 4>::conj() const
+{
+ return conjugate(*this);
+}
+
+template<> inline Vec<double, 4> Vec<double, 4>::conj() const
+{
+ return conjugate(*this);
+}
+
+template<typename _Tp, int cn> inline Vec<_Tp, cn> Vec<_Tp, cn>::cross(const Vec<_Tp, cn>&) const
+{
+ CV_Error(CV_StsError, "for arbitrary-size vector there is no cross-product defined");
+ return Vec<_Tp, cn>();
+}
+
+template<typename _Tp, int cn> template<typename T2>
+inline Vec<_Tp, cn>::operator Vec<T2, cn>() const
+{
+ Vec<T2, cn> v;
+ for( int i = 0; i < cn; i++ ) v.val[i] = saturate_cast<T2>(this->val[i]);
+ return v;
+}
+
+template<typename _Tp, int cn> inline Vec<_Tp, cn>::operator CvScalar() const
+{
+ CvScalar s = {{0,0,0,0}};
+ int i;
+ for( i = 0; i < std::min(cn, 4); i++ ) s.val[i] = this->val[i];
+ for( ; i < 4; i++ ) s.val[i] = 0;
+ return s;
+}
+
+template<typename _Tp, int cn> inline const _Tp& Vec<_Tp, cn>::operator [](int i) const
+{
+ CV_DbgAssert( (unsigned)i < (unsigned)cn );
+ return this->val[i];
+}
+
+template<typename _Tp, int cn> inline _Tp& Vec<_Tp, cn>::operator [](int i)
+{
+ CV_DbgAssert( (unsigned)i < (unsigned)cn );
+ return this->val[i];
+}
+
+template<typename _Tp, int cn> inline const _Tp& Vec<_Tp, cn>::operator ()(int i) const
+{
+ CV_DbgAssert( (unsigned)i < (unsigned)cn );
+ return this->val[i];
+}
+
+template<typename _Tp, int cn> inline _Tp& Vec<_Tp, cn>::operator ()(int i)
+{
+ CV_DbgAssert( (unsigned)i < (unsigned)cn );
+ return this->val[i];
+}
+
+template<typename _Tp1, typename _Tp2, int cn> static inline Vec<_Tp1, cn>&
+operator += (Vec<_Tp1, cn>& a, const Vec<_Tp2, cn>& b)
+{
+ for( int i = 0; i < cn; i++ )
+ a.val[i] = saturate_cast<_Tp1>(a.val[i] + b.val[i]);
+ return a;
+}
+
+template<typename _Tp1, typename _Tp2, int cn> static inline Vec<_Tp1, cn>&
+operator -= (Vec<_Tp1, cn>& a, const Vec<_Tp2, cn>& b)
+{
+ for( int i = 0; i < cn; i++ )
+ a.val[i] = saturate_cast<_Tp1>(a.val[i] - b.val[i]);
+ return a;
+}
+
+template<typename _Tp, int cn> static inline Vec<_Tp, cn>
+operator + (const Vec<_Tp, cn>& a, const Vec<_Tp, cn>& b)
+{
+ return Vec<_Tp, cn>(a, b, Matx_AddOp());
+}
+
+template<typename _Tp, int cn> static inline Vec<_Tp, cn>
+operator - (const Vec<_Tp, cn>& a, const Vec<_Tp, cn>& b)
+{
+ return Vec<_Tp, cn>(a, b, Matx_SubOp());
+}
+
+template<typename _Tp, int cn> static inline
+Vec<_Tp, cn>& operator *= (Vec<_Tp, cn>& a, int alpha)
+{
+ for( int i = 0; i < cn; i++ )
+ a[i] = saturate_cast<_Tp>(a[i]*alpha);
+ return a;
+}
+
+template<typename _Tp, int cn> static inline
+Vec<_Tp, cn>& operator *= (Vec<_Tp, cn>& a, float alpha)
+{
+ for( int i = 0; i < cn; i++ )
+ a[i] = saturate_cast<_Tp>(a[i]*alpha);
+ return a;
+}
+
+template<typename _Tp, int cn> static inline
+Vec<_Tp, cn>& operator *= (Vec<_Tp, cn>& a, double alpha)
+{
+ for( int i = 0; i < cn; i++ )
+ a[i] = saturate_cast<_Tp>(a[i]*alpha);
+ return a;
+}
+
+template<typename _Tp, int cn> static inline
+Vec<_Tp, cn>& operator /= (Vec<_Tp, cn>& a, int alpha)
+{
+ double ialpha = 1./alpha;
+ for( int i = 0; i < cn; i++ )
+ a[i] = saturate_cast<_Tp>(a[i]*ialpha);
+ return a;
+}
+
+template<typename _Tp, int cn> static inline
+Vec<_Tp, cn>& operator /= (Vec<_Tp, cn>& a, float alpha)
+{
+ float ialpha = 1.f/alpha;
+ for( int i = 0; i < cn; i++ )
+ a[i] = saturate_cast<_Tp>(a[i]*ialpha);
+ return a;
+}
+
+template<typename _Tp, int cn> static inline
+Vec<_Tp, cn>& operator /= (Vec<_Tp, cn>& a, double alpha)
+{
+ double ialpha = 1./alpha;
+ for( int i = 0; i < cn; i++ )
+ a[i] = saturate_cast<_Tp>(a[i]*ialpha);
+ return a;
+}
+
+template<typename _Tp, int cn> static inline Vec<_Tp, cn>
+operator * (const Vec<_Tp, cn>& a, int alpha)
+{
+ return Vec<_Tp, cn>(a, alpha, Matx_ScaleOp());
+}
+
+template<typename _Tp, int cn> static inline Vec<_Tp, cn>
+operator * (int alpha, const Vec<_Tp, cn>& a)
+{
+ return Vec<_Tp, cn>(a, alpha, Matx_ScaleOp());
+}
+
+template<typename _Tp, int cn> static inline Vec<_Tp, cn>
+operator * (const Vec<_Tp, cn>& a, float alpha)
+{
+ return Vec<_Tp, cn>(a, alpha, Matx_ScaleOp());
+}
+
+template<typename _Tp, int cn> static inline Vec<_Tp, cn>
+operator * (float alpha, const Vec<_Tp, cn>& a)
+{
+ return Vec<_Tp, cn>(a, alpha, Matx_ScaleOp());
+}
+
+template<typename _Tp, int cn> static inline Vec<_Tp, cn>
+operator * (const Vec<_Tp, cn>& a, double alpha)
+{
+ return Vec<_Tp, cn>(a, alpha, Matx_ScaleOp());
+}
+
+template<typename _Tp, int cn> static inline Vec<_Tp, cn>
+operator * (double alpha, const Vec<_Tp, cn>& a)
+{
+ return Vec<_Tp, cn>(a, alpha, Matx_ScaleOp());
+}
+
+template<typename _Tp, int cn> static inline Vec<_Tp, cn>
+operator / (const Vec<_Tp, cn>& a, int alpha)
+{
+ return Vec<_Tp, cn>(a, 1./alpha, Matx_ScaleOp());
+}
+
+template<typename _Tp, int cn> static inline Vec<_Tp, cn>
+operator / (const Vec<_Tp, cn>& a, float alpha)
+{
+ return Vec<_Tp, cn>(a, 1.f/alpha, Matx_ScaleOp());
+}
+
+template<typename _Tp, int cn> static inline Vec<_Tp, cn>
+operator / (const Vec<_Tp, cn>& a, double alpha)
+{
+ return Vec<_Tp, cn>(a, 1./alpha, Matx_ScaleOp());
+}
+
+template<typename _Tp, int cn> static inline Vec<_Tp, cn>
+operator - (const Vec<_Tp, cn>& a)
+{
+ Vec<_Tp,cn> t;
+ for( int i = 0; i < cn; i++ ) t.val[i] = saturate_cast<_Tp>(-a.val[i]);
+ return t;
+}
+
+template<typename _Tp> inline Vec<_Tp, 4> operator * (const Vec<_Tp, 4>& v1, const Vec<_Tp, 4>& v2)
+{
+ return Vec<_Tp, 4>(saturate_cast<_Tp>(v1[0]*v2[0] - v1[1]*v2[1] - v1[2]*v2[2] - v1[3]*v2[3]),
+ saturate_cast<_Tp>(v1[0]*v2[1] + v1[1]*v2[0] + v1[2]*v2[3] - v1[3]*v2[2]),
+ saturate_cast<_Tp>(v1[0]*v2[2] - v1[1]*v2[3] + v1[2]*v2[0] + v1[3]*v2[1]),
+ saturate_cast<_Tp>(v1[0]*v2[3] + v1[1]*v2[2] - v1[2]*v2[1] + v1[3]*v2[0]));
+}
+
+template<typename _Tp> inline Vec<_Tp, 4>& operator *= (Vec<_Tp, 4>& v1, const Vec<_Tp, 4>& v2)
+{
+ v1 = v1 * v2;
+ return v1;
+}
+
+template<> inline Vec<float, 3> Vec<float, 3>::cross(const Vec<float, 3>& v) const
+{
+ return Vec<float,3>(val[1]*v.val[2] - val[2]*v.val[1],
+ val[2]*v.val[0] - val[0]*v.val[2],
+ val[0]*v.val[1] - val[1]*v.val[0]);
+}
+
+template<> inline Vec<double, 3> Vec<double, 3>::cross(const Vec<double, 3>& v) const
+{
+ return Vec<double,3>(val[1]*v.val[2] - val[2]*v.val[1],
+ val[2]*v.val[0] - val[0]*v.val[2],
+ val[0]*v.val[1] - val[1]*v.val[0]);
+}
+
+template<typename _Tp, int cn> inline Vec<_Tp, cn> normalize(const Vec<_Tp, cn>& v)
+{
+ double nv = norm(v);
+ return v * (nv ? 1./nv : 0.);
+}
+
+template<typename _Tp, typename _T2, int cn> static inline
+VecCommaInitializer<_Tp, cn> operator << (const Vec<_Tp, cn>& vec, _T2 val)
+{
+ VecCommaInitializer<_Tp, cn> commaInitializer((Vec<_Tp, cn>*)&vec);
+ return (commaInitializer, val);
+}
+
+template<typename _Tp, int cn> inline
+VecCommaInitializer<_Tp, cn>::VecCommaInitializer(Vec<_Tp, cn>* _vec)
+ : MatxCommaInitializer<_Tp, cn, 1>(_vec)
+{}
+
+template<typename _Tp, int cn> template<typename _T2> inline
+VecCommaInitializer<_Tp, cn>& VecCommaInitializer<_Tp, cn>::operator , (_T2 value)
+{
+ CV_DbgAssert( this->idx < cn );
+ this->dst->val[this->idx++] = saturate_cast<_Tp>(value);
+ return *this;
+}
+
+template<typename _Tp, int cn> inline
+Vec<_Tp, cn> VecCommaInitializer<_Tp, cn>::operator *() const
+{
+ CV_DbgAssert( this->idx == cn );
+ return *this->dst;
+}
+
+//////////////////////////////// Complex //////////////////////////////
+
+template<typename _Tp> inline Complex<_Tp>::Complex() : re(0), im(0) {}
+template<typename _Tp> inline Complex<_Tp>::Complex( _Tp _re, _Tp _im ) : re(_re), im(_im) {}
+template<typename _Tp> template<typename T2> inline Complex<_Tp>::operator Complex<T2>() const
+{ return Complex<T2>(saturate_cast<T2>(re), saturate_cast<T2>(im)); }
+template<typename _Tp> inline Complex<_Tp> Complex<_Tp>::conj() const
+{ return Complex<_Tp>(re, -im); }
+
+template<typename _Tp> static inline
+bool operator == (const Complex<_Tp>& a, const Complex<_Tp>& b)
+{ return a.re == b.re && a.im == b.im; }
+
+template<typename _Tp> static inline
+bool operator != (const Complex<_Tp>& a, const Complex<_Tp>& b)
+{ return a.re != b.re || a.im != b.im; }
+
+template<typename _Tp> static inline
+Complex<_Tp> operator + (const Complex<_Tp>& a, const Complex<_Tp>& b)
+{ return Complex<_Tp>( a.re + b.re, a.im + b.im ); }
+
+template<typename _Tp> static inline
+Complex<_Tp>& operator += (Complex<_Tp>& a, const Complex<_Tp>& b)
+{ a.re += b.re; a.im += b.im; return a; }
+
+template<typename _Tp> static inline
+Complex<_Tp> operator - (const Complex<_Tp>& a, const Complex<_Tp>& b)
+{ return Complex<_Tp>( a.re - b.re, a.im - b.im ); }
+
+template<typename _Tp> static inline
+Complex<_Tp>& operator -= (Complex<_Tp>& a, const Complex<_Tp>& b)
+{ a.re -= b.re; a.im -= b.im; return a; }
+
+template<typename _Tp> static inline
+Complex<_Tp> operator - (const Complex<_Tp>& a)
+{ return Complex<_Tp>(-a.re, -a.im); }
+
+template<typename _Tp> static inline
+Complex<_Tp> operator * (const Complex<_Tp>& a, const Complex<_Tp>& b)
+{ return Complex<_Tp>( a.re*b.re - a.im*b.im, a.re*b.im + a.im*b.re ); }
+
+template<typename _Tp> static inline
+Complex<_Tp> operator * (const Complex<_Tp>& a, _Tp b)
+{ return Complex<_Tp>( a.re*b, a.im*b ); }
+
+template<typename _Tp> static inline
+Complex<_Tp> operator * (_Tp b, const Complex<_Tp>& a)
+{ return Complex<_Tp>( a.re*b, a.im*b ); }
+
+template<typename _Tp> static inline
+Complex<_Tp> operator + (const Complex<_Tp>& a, _Tp b)
+{ return Complex<_Tp>( a.re + b, a.im ); }
+
+template<typename _Tp> static inline
+Complex<_Tp> operator - (const Complex<_Tp>& a, _Tp b)
+{ return Complex<_Tp>( a.re - b, a.im ); }
+
+template<typename _Tp> static inline
+Complex<_Tp> operator + (_Tp b, const Complex<_Tp>& a)
+{ return Complex<_Tp>( a.re + b, a.im ); }
+
+template<typename _Tp> static inline
+Complex<_Tp> operator - (_Tp b, const Complex<_Tp>& a)
+{ return Complex<_Tp>( b - a.re, -a.im ); }
+
+template<typename _Tp> static inline
+Complex<_Tp>& operator += (Complex<_Tp>& a, _Tp b)
+{ a.re += b; return a; }
+
+template<typename _Tp> static inline
+Complex<_Tp>& operator -= (Complex<_Tp>& a, _Tp b)
+{ a.re -= b; return a; }
+
+template<typename _Tp> static inline
+Complex<_Tp>& operator *= (Complex<_Tp>& a, _Tp b)
+{ a.re *= b; a.im *= b; return a; }
+
+template<typename _Tp> static inline
+double abs(const Complex<_Tp>& a)
+{ return std::sqrt( (double)a.re*a.re + (double)a.im*a.im); }
+
+template<typename _Tp> static inline
+Complex<_Tp> operator / (const Complex<_Tp>& a, const Complex<_Tp>& b)
+{
+ double t = 1./((double)b.re*b.re + (double)b.im*b.im);
+ return Complex<_Tp>( (_Tp)((a.re*b.re + a.im*b.im)*t),
+ (_Tp)((-a.re*b.im + a.im*b.re)*t) );
+}
+
+template<typename _Tp> static inline
+Complex<_Tp>& operator /= (Complex<_Tp>& a, const Complex<_Tp>& b)
+{
+ return (a = a / b);
+}
+
+template<typename _Tp> static inline
+Complex<_Tp> operator / (const Complex<_Tp>& a, _Tp b)
+{
+ _Tp t = (_Tp)1/b;
+ return Complex<_Tp>( a.re*t, a.im*t );
+}
+
+template<typename _Tp> static inline
+Complex<_Tp> operator / (_Tp b, const Complex<_Tp>& a)
+{
+ return Complex<_Tp>(b)/a;
+}
+
+template<typename _Tp> static inline
+Complex<_Tp> operator /= (const Complex<_Tp>& a, _Tp b)
+{
+ _Tp t = (_Tp)1/b;
+ a.re *= t; a.im *= t; return a;
+}
+
+//////////////////////////////// 2D Point ////////////////////////////////
+
+template<typename _Tp> inline Point_<_Tp>::Point_() : x(0), y(0) {}
+template<typename _Tp> inline Point_<_Tp>::Point_(_Tp _x, _Tp _y) : x(_x), y(_y) {}
+template<typename _Tp> inline Point_<_Tp>::Point_(const Point_& pt) : x(pt.x), y(pt.y) {}
+template<typename _Tp> inline Point_<_Tp>::Point_(const CvPoint& pt) : x((_Tp)pt.x), y((_Tp)pt.y) {}
+template<typename _Tp> inline Point_<_Tp>::Point_(const CvPoint2D32f& pt)
+ : x(saturate_cast<_Tp>(pt.x)), y(saturate_cast<_Tp>(pt.y)) {}
+template<typename _Tp> inline Point_<_Tp>::Point_(const Size_<_Tp>& sz) : x(sz.width), y(sz.height) {}
+template<typename _Tp> inline Point_<_Tp>::Point_(const Vec<_Tp,2>& v) : x(v[0]), y(v[1]) {}
+template<typename _Tp> inline Point_<_Tp>& Point_<_Tp>::operator = (const Point_& pt)
+{ x = pt.x; y = pt.y; return *this; }
+
+template<typename _Tp> template<typename _Tp2> inline Point_<_Tp>::operator Point_<_Tp2>() const
+{ return Point_<_Tp2>(saturate_cast<_Tp2>(x), saturate_cast<_Tp2>(y)); }
+template<typename _Tp> inline Point_<_Tp>::operator CvPoint() const
+{ return cvPoint(saturate_cast<int>(x), saturate_cast<int>(y)); }
+template<typename _Tp> inline Point_<_Tp>::operator CvPoint2D32f() const
+{ return cvPoint2D32f((float)x, (float)y); }
+template<typename _Tp> inline Point_<_Tp>::operator Vec<_Tp, 2>() const
+{ return Vec<_Tp, 2>(x, y); }
+
+template<typename _Tp> inline _Tp Point_<_Tp>::dot(const Point_& pt) const
+{ return saturate_cast<_Tp>(x*pt.x + y*pt.y); }
+template<typename _Tp> inline double Point_<_Tp>::ddot(const Point_& pt) const
+{ return (double)x*pt.x + (double)y*pt.y; }
+
+template<typename _Tp> inline double Point_<_Tp>::cross(const Point_& pt) const
+{ return (double)x*pt.y - (double)y*pt.x; }
+
+template<typename _Tp> static inline Point_<_Tp>&
+operator += (Point_<_Tp>& a, const Point_<_Tp>& b)
+{
+ a.x = saturate_cast<_Tp>(a.x + b.x);
+ a.y = saturate_cast<_Tp>(a.y + b.y);
+ return a;
+}
+
+template<typename _Tp> static inline Point_<_Tp>&
+operator -= (Point_<_Tp>& a, const Point_<_Tp>& b)
+{
+ a.x = saturate_cast<_Tp>(a.x - b.x);
+ a.y = saturate_cast<_Tp>(a.y - b.y);
+ return a;
+}
+
+template<typename _Tp> static inline Point_<_Tp>&
+operator *= (Point_<_Tp>& a, int b)
+{
+ a.x = saturate_cast<_Tp>(a.x*b);
+ a.y = saturate_cast<_Tp>(a.y*b);
+ return a;
+}
+
+template<typename _Tp> static inline Point_<_Tp>&
+operator *= (Point_<_Tp>& a, float b)
+{
+ a.x = saturate_cast<_Tp>(a.x*b);
+ a.y = saturate_cast<_Tp>(a.y*b);
+ return a;
+}
+
+template<typename _Tp> static inline Point_<_Tp>&
+operator *= (Point_<_Tp>& a, double b)
+{
+ a.x = saturate_cast<_Tp>(a.x*b);
+ a.y = saturate_cast<_Tp>(a.y*b);
+ return a;
+}
+
+template<typename _Tp> static inline double norm(const Point_<_Tp>& pt)
+{ return std::sqrt((double)pt.x*pt.x + (double)pt.y*pt.y); }
+
+template<typename _Tp> static inline bool operator == (const Point_<_Tp>& a, const Point_<_Tp>& b)
+{ return a.x == b.x && a.y == b.y; }
+
+template<typename _Tp> static inline bool operator != (const Point_<_Tp>& a, const Point_<_Tp>& b)
+{ return a.x != b.x || a.y != b.y; }
+
+template<typename _Tp> static inline Point_<_Tp> operator + (const Point_<_Tp>& a, const Point_<_Tp>& b)
+{ return Point_<_Tp>( saturate_cast<_Tp>(a.x + b.x), saturate_cast<_Tp>(a.y + b.y) ); }
+
+template<typename _Tp> static inline Point_<_Tp> operator - (const Point_<_Tp>& a, const Point_<_Tp>& b)
+{ return Point_<_Tp>( saturate_cast<_Tp>(a.x - b.x), saturate_cast<_Tp>(a.y - b.y) ); }
+
+template<typename _Tp> static inline Point_<_Tp> operator - (const Point_<_Tp>& a)
+{ return Point_<_Tp>( saturate_cast<_Tp>(-a.x), saturate_cast<_Tp>(-a.y) ); }
+
+template<typename _Tp> static inline Point_<_Tp> operator * (const Point_<_Tp>& a, int b)
+{ return Point_<_Tp>( saturate_cast<_Tp>(a.x*b), saturate_cast<_Tp>(a.y*b) ); }
+
+template<typename _Tp> static inline Point_<_Tp> operator * (int a, const Point_<_Tp>& b)
+{ return Point_<_Tp>( saturate_cast<_Tp>(b.x*a), saturate_cast<_Tp>(b.y*a) ); }
+
+template<typename _Tp> static inline Point_<_Tp> operator * (const Point_<_Tp>& a, float b)
+{ return Point_<_Tp>( saturate_cast<_Tp>(a.x*b), saturate_cast<_Tp>(a.y*b) ); }
+
+template<typename _Tp> static inline Point_<_Tp> operator * (float a, const Point_<_Tp>& b)
+{ return Point_<_Tp>( saturate_cast<_Tp>(b.x*a), saturate_cast<_Tp>(b.y*a) ); }
+
+template<typename _Tp> static inline Point_<_Tp> operator * (const Point_<_Tp>& a, double b)
+{ return Point_<_Tp>( saturate_cast<_Tp>(a.x*b), saturate_cast<_Tp>(a.y*b) ); }
+
+template<typename _Tp> static inline Point_<_Tp> operator * (double a, const Point_<_Tp>& b)
+{ return Point_<_Tp>( saturate_cast<_Tp>(b.x*a), saturate_cast<_Tp>(b.y*a) ); }
+
+//////////////////////////////// 3D Point ////////////////////////////////
+
+template<typename _Tp> inline Point3_<_Tp>::Point3_() : x(0), y(0), z(0) {}
+template<typename _Tp> inline Point3_<_Tp>::Point3_(_Tp _x, _Tp _y, _Tp _z) : x(_x), y(_y), z(_z) {}
+template<typename _Tp> inline Point3_<_Tp>::Point3_(const Point3_& pt) : x(pt.x), y(pt.y), z(pt.z) {}
+template<typename _Tp> inline Point3_<_Tp>::Point3_(const Point_<_Tp>& pt) : x(pt.x), y(pt.y), z(_Tp()) {}
+template<typename _Tp> inline Point3_<_Tp>::Point3_(const CvPoint3D32f& pt) :
+ x(saturate_cast<_Tp>(pt.x)), y(saturate_cast<_Tp>(pt.y)), z(saturate_cast<_Tp>(pt.z)) {}
+template<typename _Tp> inline Point3_<_Tp>::Point3_(const Vec<_Tp, 3>& v) : x(v[0]), y(v[1]), z(v[2]) {}
+
+template<typename _Tp> template<typename _Tp2> inline Point3_<_Tp>::operator Point3_<_Tp2>() const
+{ return Point3_<_Tp2>(saturate_cast<_Tp2>(x), saturate_cast<_Tp2>(y), saturate_cast<_Tp2>(z)); }
+
+template<typename _Tp> inline Point3_<_Tp>::operator CvPoint3D32f() const
+{ return cvPoint3D32f((float)x, (float)y, (float)z); }
+
+template<typename _Tp> inline Point3_<_Tp>::operator Vec<_Tp, 3>() const
+{ return Vec<_Tp, 3>(x, y, z); }
+
+template<typename _Tp> inline Point3_<_Tp>& Point3_<_Tp>::operator = (const Point3_& pt)
+{ x = pt.x; y = pt.y; z = pt.z; return *this; }
+
+template<typename _Tp> inline _Tp Point3_<_Tp>::dot(const Point3_& pt) const
+{ return saturate_cast<_Tp>(x*pt.x + y*pt.y + z*pt.z); }
+template<typename _Tp> inline double Point3_<_Tp>::ddot(const Point3_& pt) const
+{ return (double)x*pt.x + (double)y*pt.y + (double)z*pt.z; }
+
+template<typename _Tp> inline Point3_<_Tp> Point3_<_Tp>::cross(const Point3_<_Tp>& pt) const
+{
+ return Point3_<_Tp>(y*pt.z - z*pt.y, z*pt.x - x*pt.z, x*pt.y - y*pt.x);
+}
+
+template<typename _Tp> static inline Point3_<_Tp>&
+operator += (Point3_<_Tp>& a, const Point3_<_Tp>& b)
+{
+ a.x = saturate_cast<_Tp>(a.x + b.x);
+ a.y = saturate_cast<_Tp>(a.y + b.y);
+ a.z = saturate_cast<_Tp>(a.z + b.z);
+ return a;
+}
+
+template<typename _Tp> static inline Point3_<_Tp>&
+operator -= (Point3_<_Tp>& a, const Point3_<_Tp>& b)
+{
+ a.x = saturate_cast<_Tp>(a.x - b.x);
+ a.y = saturate_cast<_Tp>(a.y - b.y);
+ a.z = saturate_cast<_Tp>(a.z - b.z);
+ return a;
+}
+
+template<typename _Tp> static inline Point3_<_Tp>&
+operator *= (Point3_<_Tp>& a, int b)
+{
+ a.x = saturate_cast<_Tp>(a.x*b);
+ a.y = saturate_cast<_Tp>(a.y*b);
+ a.z = saturate_cast<_Tp>(a.z*b);
+ return a;
+}
+
+template<typename _Tp> static inline Point3_<_Tp>&
+operator *= (Point3_<_Tp>& a, float b)
+{
+ a.x = saturate_cast<_Tp>(a.x*b);
+ a.y = saturate_cast<_Tp>(a.y*b);
+ a.z = saturate_cast<_Tp>(a.z*b);
+ return a;
+}
+
+template<typename _Tp> static inline Point3_<_Tp>&
+operator *= (Point3_<_Tp>& a, double b)
+{
+ a.x = saturate_cast<_Tp>(a.x*b);
+ a.y = saturate_cast<_Tp>(a.y*b);
+ a.z = saturate_cast<_Tp>(a.z*b);
+ return a;
+}
+
+template<typename _Tp> static inline double norm(const Point3_<_Tp>& pt)
+{ return std::sqrt((double)pt.x*pt.x + (double)pt.y*pt.y + (double)pt.z*pt.z); }
+
+template<typename _Tp> static inline bool operator == (const Point3_<_Tp>& a, const Point3_<_Tp>& b)
+{ return a.x == b.x && a.y == b.y && a.z == b.z; }
+
+template<typename _Tp> static inline bool operator != (const Point3_<_Tp>& a, const Point3_<_Tp>& b)
+{ return a.x != b.x || a.y != b.y || a.z != b.z; }
+
+template<typename _Tp> static inline Point3_<_Tp> operator + (const Point3_<_Tp>& a, const Point3_<_Tp>& b)
+{ return Point3_<_Tp>( saturate_cast<_Tp>(a.x + b.x),
+ saturate_cast<_Tp>(a.y + b.y),
+ saturate_cast<_Tp>(a.z + b.z)); }
+
+template<typename _Tp> static inline Point3_<_Tp> operator - (const Point3_<_Tp>& a, const Point3_<_Tp>& b)
+{ return Point3_<_Tp>( saturate_cast<_Tp>(a.x - b.x),
+ saturate_cast<_Tp>(a.y - b.y),
+ saturate_cast<_Tp>(a.z - b.z)); }
+
+template<typename _Tp> static inline Point3_<_Tp> operator - (const Point3_<_Tp>& a)
+{ return Point3_<_Tp>( saturate_cast<_Tp>(-a.x),
+ saturate_cast<_Tp>(-a.y),
+ saturate_cast<_Tp>(-a.z) ); }
+
+template<typename _Tp> static inline Point3_<_Tp> operator * (const Point3_<_Tp>& a, int b)
+{ return Point3_<_Tp>( saturate_cast<_Tp>(a.x*b),
+ saturate_cast<_Tp>(a.y*b),
+ saturate_cast<_Tp>(a.z*b) ); }
+
+template<typename _Tp> static inline Point3_<_Tp> operator * (int a, const Point3_<_Tp>& b)
+{ return Point3_<_Tp>( saturate_cast<_Tp>(b.x*a),
+ saturate_cast<_Tp>(b.y*a),
+ saturate_cast<_Tp>(b.z*a) ); }
+
+template<typename _Tp> static inline Point3_<_Tp> operator * (const Point3_<_Tp>& a, float b)
+{ return Point3_<_Tp>( saturate_cast<_Tp>(a.x*b),
+ saturate_cast<_Tp>(a.y*b),
+ saturate_cast<_Tp>(a.z*b) ); }
+
+template<typename _Tp> static inline Point3_<_Tp> operator * (float a, const Point3_<_Tp>& b)
+{ return Point3_<_Tp>( saturate_cast<_Tp>(b.x*a),
+ saturate_cast<_Tp>(b.y*a),
+ saturate_cast<_Tp>(b.z*a) ); }
+
+template<typename _Tp> static inline Point3_<_Tp> operator * (const Point3_<_Tp>& a, double b)
+{ return Point3_<_Tp>( saturate_cast<_Tp>(a.x*b),
+ saturate_cast<_Tp>(a.y*b),
+ saturate_cast<_Tp>(a.z*b) ); }
+
+template<typename _Tp> static inline Point3_<_Tp> operator * (double a, const Point3_<_Tp>& b)
+{ return Point3_<_Tp>( saturate_cast<_Tp>(b.x*a),
+ saturate_cast<_Tp>(b.y*a),
+ saturate_cast<_Tp>(b.z*a) ); }
+
+//////////////////////////////// Size ////////////////////////////////
+
+template<typename _Tp> inline Size_<_Tp>::Size_()
+ : width(0), height(0) {}
+template<typename _Tp> inline Size_<_Tp>::Size_(_Tp _width, _Tp _height)
+ : width(_width), height(_height) {}
+template<typename _Tp> inline Size_<_Tp>::Size_(const Size_& sz)
+ : width(sz.width), height(sz.height) {}
+template<typename _Tp> inline Size_<_Tp>::Size_(const CvSize& sz)
+ : width(saturate_cast<_Tp>(sz.width)), height(saturate_cast<_Tp>(sz.height)) {}
+template<typename _Tp> inline Size_<_Tp>::Size_(const CvSize2D32f& sz)
+ : width(saturate_cast<_Tp>(sz.width)), height(saturate_cast<_Tp>(sz.height)) {}
+template<typename _Tp> inline Size_<_Tp>::Size_(const Point_<_Tp>& pt) : width(pt.x), height(pt.y) {}
+
+template<typename _Tp> template<typename _Tp2> inline Size_<_Tp>::operator Size_<_Tp2>() const
+{ return Size_<_Tp2>(saturate_cast<_Tp2>(width), saturate_cast<_Tp2>(height)); }
+template<typename _Tp> inline Size_<_Tp>::operator CvSize() const
+{ return cvSize(saturate_cast<int>(width), saturate_cast<int>(height)); }
+template<typename _Tp> inline Size_<_Tp>::operator CvSize2D32f() const
+{ return cvSize2D32f((float)width, (float)height); }
+
+template<typename _Tp> inline Size_<_Tp>& Size_<_Tp>::operator = (const Size_<_Tp>& sz)
+{ width = sz.width; height = sz.height; return *this; }
+template<typename _Tp> static inline Size_<_Tp> operator * (const Size_<_Tp>& a, _Tp b)
+{ return Size_<_Tp>(a.width * b, a.height * b); }
+template<typename _Tp> static inline Size_<_Tp> operator + (const Size_<_Tp>& a, const Size_<_Tp>& b)
+{ return Size_<_Tp>(a.width + b.width, a.height + b.height); }
+template<typename _Tp> static inline Size_<_Tp> operator - (const Size_<_Tp>& a, const Size_<_Tp>& b)
+{ return Size_<_Tp>(a.width - b.width, a.height - b.height); }
+template<typename _Tp> inline _Tp Size_<_Tp>::area() const { return width*height; }
+
+template<typename _Tp> static inline Size_<_Tp>& operator += (Size_<_Tp>& a, const Size_<_Tp>& b)
+{ a.width += b.width; a.height += b.height; return a; }
+template<typename _Tp> static inline Size_<_Tp>& operator -= (Size_<_Tp>& a, const Size_<_Tp>& b)
+{ a.width -= b.width; a.height -= b.height; return a; }
+
+template<typename _Tp> static inline bool operator == (const Size_<_Tp>& a, const Size_<_Tp>& b)
+{ return a.width == b.width && a.height == b.height; }
+template<typename _Tp> static inline bool operator != (const Size_<_Tp>& a, const Size_<_Tp>& b)
+{ return a.width != b.width || a.height != b.height; }
+
+//////////////////////////////// Rect ////////////////////////////////
+
+
+template<typename _Tp> inline Rect_<_Tp>::Rect_() : x(0), y(0), width(0), height(0) {}
+template<typename _Tp> inline Rect_<_Tp>::Rect_(_Tp _x, _Tp _y, _Tp _width, _Tp _height) : x(_x), y(_y), width(_width), height(_height) {}
+template<typename _Tp> inline Rect_<_Tp>::Rect_(const Rect_<_Tp>& r) : x(r.x), y(r.y), width(r.width), height(r.height) {}
+template<typename _Tp> inline Rect_<_Tp>::Rect_(const CvRect& r) : x((_Tp)r.x), y((_Tp)r.y), width((_Tp)r.width), height((_Tp)r.height) {}
+template<typename _Tp> inline Rect_<_Tp>::Rect_(const Point_<_Tp>& org, const Size_<_Tp>& sz) :
+ x(org.x), y(org.y), width(sz.width), height(sz.height) {}
+template<typename _Tp> inline Rect_<_Tp>::Rect_(const Point_<_Tp>& pt1, const Point_<_Tp>& pt2)
+{
+ x = std::min(pt1.x, pt2.x); y = std::min(pt1.y, pt2.y);
+ width = std::max(pt1.x, pt2.x) - x; height = std::max(pt1.y, pt2.y) - y;
+}
+template<typename _Tp> inline Rect_<_Tp>& Rect_<_Tp>::operator = ( const Rect_<_Tp>& r )
+{ x = r.x; y = r.y; width = r.width; height = r.height; return *this; }
+
+template<typename _Tp> inline Point_<_Tp> Rect_<_Tp>::tl() const { return Point_<_Tp>(x,y); }
+template<typename _Tp> inline Point_<_Tp> Rect_<_Tp>::br() const { return Point_<_Tp>(x+width, y+height); }
+
+template<typename _Tp> static inline Rect_<_Tp>& operator += ( Rect_<_Tp>& a, const Point_<_Tp>& b )
+{ a.x += b.x; a.y += b.y; return a; }
+template<typename _Tp> static inline Rect_<_Tp>& operator -= ( Rect_<_Tp>& a, const Point_<_Tp>& b )
+{ a.x -= b.x; a.y -= b.y; return a; }
+
+template<typename _Tp> static inline Rect_<_Tp>& operator += ( Rect_<_Tp>& a, const Size_<_Tp>& b )
+{ a.width += b.width; a.height += b.height; return a; }
+
+template<typename _Tp> static inline Rect_<_Tp>& operator -= ( Rect_<_Tp>& a, const Size_<_Tp>& b )
+{ a.width -= b.width; a.height -= b.height; return a; }
+
+template<typename _Tp> static inline Rect_<_Tp>& operator &= ( Rect_<_Tp>& a, const Rect_<_Tp>& b )
+{
+ _Tp x1 = std::max(a.x, b.x), y1 = std::max(a.y, b.y);
+ a.width = std::min(a.x + a.width, b.x + b.width) - x1;
+ a.height = std::min(a.y + a.height, b.y + b.height) - y1;
+ a.x = x1; a.y = y1;
+ if( a.width <= 0 || a.height <= 0 )
+ a = Rect();
+ return a;
+}
+
+template<typename _Tp> static inline Rect_<_Tp>& operator |= ( Rect_<_Tp>& a, const Rect_<_Tp>& b )
+{
+ _Tp x1 = std::min(a.x, b.x), y1 = std::min(a.y, b.y);
+ a.width = std::max(a.x + a.width, b.x + b.width) - x1;
+ a.height = std::max(a.y + a.height, b.y + b.height) - y1;
+ a.x = x1; a.y = y1;
+ return a;
+}
+
+template<typename _Tp> inline Size_<_Tp> Rect_<_Tp>::size() const { return Size_<_Tp>(width, height); }
+template<typename _Tp> inline _Tp Rect_<_Tp>::area() const { return width*height; }
+
+template<typename _Tp> template<typename _Tp2> inline Rect_<_Tp>::operator Rect_<_Tp2>() const
+{ return Rect_<_Tp2>(saturate_cast<_Tp2>(x), saturate_cast<_Tp2>(y),
+ saturate_cast<_Tp2>(width), saturate_cast<_Tp2>(height)); }
+template<typename _Tp> inline Rect_<_Tp>::operator CvRect() const
+{ return cvRect(saturate_cast<int>(x), saturate_cast<int>(y),
+ saturate_cast<int>(width), saturate_cast<int>(height)); }
+
+template<typename _Tp> inline bool Rect_<_Tp>::contains(const Point_<_Tp>& pt) const
+{ return x <= pt.x && pt.x < x + width && y <= pt.y && pt.y < y + height; }
+
+template<typename _Tp> static inline bool operator == (const Rect_<_Tp>& a, const Rect_<_Tp>& b)
+{
+ return a.x == b.x && a.y == b.y && a.width == b.width && a.height == b.height;
+}
+
+template<typename _Tp> static inline bool operator != (const Rect_<_Tp>& a, const Rect_<_Tp>& b)
+{
+ return a.x != b.x || a.y != b.y || a.width != b.width || a.height != b.height;
+}
+
+template<typename _Tp> static inline Rect_<_Tp> operator + (const Rect_<_Tp>& a, const Point_<_Tp>& b)
+{
+ return Rect_<_Tp>( a.x + b.x, a.y + b.y, a.width, a.height );
+}
+
+template<typename _Tp> static inline Rect_<_Tp> operator - (const Rect_<_Tp>& a, const Point_<_Tp>& b)
+{
+ return Rect_<_Tp>( a.x - b.x, a.y - b.y, a.width, a.height );
+}
+
+template<typename _Tp> static inline Rect_<_Tp> operator + (const Rect_<_Tp>& a, const Size_<_Tp>& b)
+{
+ return Rect_<_Tp>( a.x, a.y, a.width + b.width, a.height + b.height );
+}
+
+template<typename _Tp> static inline Rect_<_Tp> operator & (const Rect_<_Tp>& a, const Rect_<_Tp>& b)
+{
+ Rect_<_Tp> c = a;
+ return c &= b;
+}
+
+template<typename _Tp> static inline Rect_<_Tp> operator | (const Rect_<_Tp>& a, const Rect_<_Tp>& b)
+{
+ Rect_<_Tp> c = a;
+ return c |= b;
+}
+
+template<typename _Tp> inline bool Point_<_Tp>::inside( const Rect_<_Tp>& r ) const
+{
+ return r.contains(*this);
+}
+
+inline RotatedRect::RotatedRect() { angle = 0; }
+inline RotatedRect::RotatedRect(const Point2f& _center, const Size2f& _size, float _angle)
+ : center(_center), size(_size), angle(_angle) {}
+inline RotatedRect::RotatedRect(const CvBox2D& box)
+ : center(box.center), size(box.size), angle(box.angle) {}
+inline RotatedRect::operator CvBox2D() const
+{
+ CvBox2D box; box.center = center; box.size = size; box.angle = angle;
+ return box;
+}
+
+//////////////////////////////// Scalar_ ///////////////////////////////
+
+template<typename _Tp> inline Scalar_<_Tp>::Scalar_()
+{ this->val[0] = this->val[1] = this->val[2] = this->val[3] = 0; }
+
+template<typename _Tp> inline Scalar_<_Tp>::Scalar_(_Tp v0, _Tp v1, _Tp v2, _Tp v3)
+{ this->val[0] = v0; this->val[1] = v1; this->val[2] = v2; this->val[3] = v3; }
+
+template<typename _Tp> inline Scalar_<_Tp>::Scalar_(const CvScalar& s)
+{
+ this->val[0] = saturate_cast<_Tp>(s.val[0]);
+ this->val[1] = saturate_cast<_Tp>(s.val[1]);
+ this->val[2] = saturate_cast<_Tp>(s.val[2]);
+ this->val[3] = saturate_cast<_Tp>(s.val[3]);
+}
+
+template<typename _Tp> inline Scalar_<_Tp>::Scalar_(_Tp v0)
+{ this->val[0] = v0; this->val[1] = this->val[2] = this->val[3] = 0; }
+
+template<typename _Tp> inline Scalar_<_Tp> Scalar_<_Tp>::all(_Tp v0)
+{ return Scalar_<_Tp>(v0, v0, v0, v0); }
+template<typename _Tp> inline Scalar_<_Tp>::operator CvScalar() const
+{ return cvScalar(this->val[0], this->val[1], this->val[2], this->val[3]); }
+
+template<typename _Tp> template<typename T2> inline Scalar_<_Tp>::operator Scalar_<T2>() const
+{
+ return Scalar_<T2>(saturate_cast<T2>(this->val[0]),
+ saturate_cast<T2>(this->val[1]),
+ saturate_cast<T2>(this->val[2]),
+ saturate_cast<T2>(this->val[3]));
+}
+
+template<typename _Tp> static inline Scalar_<_Tp>& operator += (Scalar_<_Tp>& a, const Scalar_<_Tp>& b)
+{
+ a.val[0] = saturate_cast<_Tp>(a.val[0] + b.val[0]);
+ a.val[1] = saturate_cast<_Tp>(a.val[1] + b.val[1]);
+ a.val[2] = saturate_cast<_Tp>(a.val[2] + b.val[2]);
+ a.val[3] = saturate_cast<_Tp>(a.val[3] + b.val[3]);
+ return a;
+}
+
+template<typename _Tp> static inline Scalar_<_Tp>& operator -= (Scalar_<_Tp>& a, const Scalar_<_Tp>& b)
+{
+ a.val[0] = saturate_cast<_Tp>(a.val[0] - b.val[0]);
+ a.val[1] = saturate_cast<_Tp>(a.val[1] - b.val[1]);
+ a.val[2] = saturate_cast<_Tp>(a.val[2] - b.val[2]);
+ a.val[3] = saturate_cast<_Tp>(a.val[3] - b.val[3]);
+ return a;
+}
+
+template<typename _Tp> static inline Scalar_<_Tp>& operator *= ( Scalar_<_Tp>& a, _Tp v )
+{
+ a.val[0] = saturate_cast<_Tp>(a.val[0] * v);
+ a.val[1] = saturate_cast<_Tp>(a.val[1] * v);
+ a.val[2] = saturate_cast<_Tp>(a.val[2] * v);
+ a.val[3] = saturate_cast<_Tp>(a.val[3] * v);
+ return a;
+}
+
+template<typename _Tp> inline Scalar_<_Tp> Scalar_<_Tp>::mul(const Scalar_<_Tp>& t, double scale ) const
+{
+ return Scalar_<_Tp>( saturate_cast<_Tp>(this->val[0]*t.val[0]*scale),
+ saturate_cast<_Tp>(this->val[1]*t.val[1]*scale),
+ saturate_cast<_Tp>(this->val[2]*t.val[2]*scale),
+ saturate_cast<_Tp>(this->val[3]*t.val[3]*scale));
+}
+
+template<typename _Tp> static inline bool operator == ( const Scalar_<_Tp>& a, const Scalar_<_Tp>& b )
+{
+ return a.val[0] == b.val[0] && a.val[1] == b.val[1] &&
+ a.val[2] == b.val[2] && a.val[3] == b.val[3];
+}
+
+template<typename _Tp> static inline bool operator != ( const Scalar_<_Tp>& a, const Scalar_<_Tp>& b )
+{
+ return a.val[0] != b.val[0] || a.val[1] != b.val[1] ||
+ a.val[2] != b.val[2] || a.val[3] != b.val[3];
+}
+
+template<typename _Tp> static inline Scalar_<_Tp> operator + (const Scalar_<_Tp>& a, const Scalar_<_Tp>& b)
+{
+ return Scalar_<_Tp>(saturate_cast<_Tp>(a.val[0] + b.val[0]),
+ saturate_cast<_Tp>(a.val[1] + b.val[1]),
+ saturate_cast<_Tp>(a.val[2] + b.val[2]),
+ saturate_cast<_Tp>(a.val[3] + b.val[3]));
+}
+
+template<typename _Tp> static inline Scalar_<_Tp> operator - (const Scalar_<_Tp>& a, const Scalar_<_Tp>& b)
+{
+ return Scalar_<_Tp>(saturate_cast<_Tp>(a.val[0] - b.val[0]),
+ saturate_cast<_Tp>(a.val[1] - b.val[1]),
+ saturate_cast<_Tp>(a.val[2] - b.val[2]),
+ saturate_cast<_Tp>(a.val[3] - b.val[3]));
+}
+
+template<typename _Tp> static inline Scalar_<_Tp> operator * (const Scalar_<_Tp>& a, _Tp alpha)
+{
+ return Scalar_<_Tp>(saturate_cast<_Tp>(a.val[0] * alpha),
+ saturate_cast<_Tp>(a.val[1] * alpha),
+ saturate_cast<_Tp>(a.val[2] * alpha),
+ saturate_cast<_Tp>(a.val[3] * alpha));
+}
+
+template<typename _Tp> static inline Scalar_<_Tp> operator * (_Tp alpha, const Scalar_<_Tp>& a)
+{
+ return a*alpha;
+}
+
+template<typename _Tp> static inline Scalar_<_Tp> operator - (const Scalar_<_Tp>& a)
+{
+ return Scalar_<_Tp>(saturate_cast<_Tp>(-a.val[0]), saturate_cast<_Tp>(-a.val[1]),
+ saturate_cast<_Tp>(-a.val[2]), saturate_cast<_Tp>(-a.val[3]));
+}
+
+
+template<typename _Tp> static inline Scalar_<_Tp>
+operator * (const Scalar_<_Tp>& a, const Scalar_<_Tp>& b)
+{
+ return Scalar_<_Tp>(saturate_cast<_Tp>(a[0]*b[0] - a[1]*b[1] - a[2]*b[2] - a[3]*b[3]),
+ saturate_cast<_Tp>(a[0]*b[1] + a[1]*b[0] + a[2]*b[3] - a[3]*b[2]),
+ saturate_cast<_Tp>(a[0]*b[2] - a[1]*b[3] + a[2]*b[0] + a[3]*b[1]),
+ saturate_cast<_Tp>(a[0]*b[3] + a[1]*b[2] - a[2]*b[1] + a[3]*b[0]));
+}
+
+template<typename _Tp> static inline Scalar_<_Tp>&
+operator *= (Scalar_<_Tp>& a, const Scalar_<_Tp>& b)
+{
+ a = a*b;
+ return a;
+}
+
+template<typename _Tp> inline Scalar_<_Tp> Scalar_<_Tp>::conj() const
+{
+ return Scalar_<_Tp>(saturate_cast<_Tp>(this->val[0]),
+ saturate_cast<_Tp>(-this->val[1]),
+ saturate_cast<_Tp>(-this->val[2]),
+ saturate_cast<_Tp>(-this->val[3]));
+}
+
+template<typename _Tp> inline bool Scalar_<_Tp>::isReal() const
+{
+ return this->val[1] == 0 && this->val[2] == 0 && this->val[3] == 0;
+}
+
+template<typename _Tp> static inline
+Scalar_<_Tp> operator / (const Scalar_<_Tp>& a, _Tp alpha)
+{
+ return Scalar_<_Tp>(saturate_cast<_Tp>(a.val[0] / alpha),
+ saturate_cast<_Tp>(a.val[1] / alpha),
+ saturate_cast<_Tp>(a.val[2] / alpha),
+ saturate_cast<_Tp>(a.val[3] / alpha));
+}
+
+template<typename _Tp> static inline
+Scalar_<float> operator / (const Scalar_<float>& a, float alpha)
+{
+ float s = 1/alpha;
+ return Scalar_<float>(a.val[0]*s, a.val[1]*s, a.val[2]*s, a.val[3]*s);
+}
+
+template<typename _Tp> static inline
+Scalar_<double> operator / (const Scalar_<double>& a, double alpha)
+{
+ double s = 1/alpha;
+ return Scalar_<double>(a.val[0]*s, a.val[1]*s, a.val[2]*s, a.val[3]*s);
+}
+
+template<typename _Tp> static inline
+Scalar_<_Tp>& operator /= (Scalar_<_Tp>& a, _Tp alpha)
+{
+ a = a/alpha;
+ return a;
+}
+
+template<typename _Tp> static inline
+Scalar_<_Tp> operator / (_Tp a, const Scalar_<_Tp>& b)
+{
+ _Tp s = a/(b[0]*b[0] + b[1]*b[1] + b[2]*b[2] + b[3]*b[3]);
+ return b.conj()*s;
+}
+
+template<typename _Tp> static inline
+Scalar_<_Tp> operator / (const Scalar_<_Tp>& a, const Scalar_<_Tp>& b)
+{
+ return a*((_Tp)1/b);
+}
+
+template<typename _Tp> static inline
+Scalar_<_Tp>& operator /= (Scalar_<_Tp>& a, const Scalar_<_Tp>& b)
+{
+ a = a/b;
+ return a;
+}
+
+//////////////////////////////// Range /////////////////////////////////
+
+inline Range::Range() : start(0), end(0) {}
+inline Range::Range(int _start, int _end) : start(_start), end(_end) {}
+inline Range::Range(const CvSlice& slice) : start(slice.start_index), end(slice.end_index)
+{
+ if( start == 0 && end == CV_WHOLE_SEQ_END_INDEX )
+ *this = Range::all();
+}
+
+inline int Range::size() const { return end - start; }
+inline bool Range::empty() const { return start == end; }
+inline Range Range::all() { return Range(INT_MIN, INT_MAX); }
+
+static inline bool operator == (const Range& r1, const Range& r2)
+{ return r1.start == r2.start && r1.end == r2.end; }
+
+static inline bool operator != (const Range& r1, const Range& r2)
+{ return !(r1 == r2); }
+
+static inline bool operator !(const Range& r)
+{ return r.start == r.end; }
+
+static inline Range operator & (const Range& r1, const Range& r2)
+{
+ Range r(std::max(r1.start, r2.start), std::min(r1.end, r2.end));
+ r.end = std::max(r.end, r.start);
+ return r;
+}
+
+static inline Range& operator &= (Range& r1, const Range& r2)
+{
+ r1 = r1 & r2;
+ return r1;
+}
+
+static inline Range operator + (const Range& r1, int delta)
+{
+ return Range(r1.start + delta, r1.end + delta);
+}
+
+static inline Range operator + (int delta, const Range& r1)
+{
+ return Range(r1.start + delta, r1.end + delta);
+}
+
+static inline Range operator - (const Range& r1, int delta)
+{
+ return r1 + (-delta);
+}
+
+inline Range::operator CvSlice() const
+{ return *this != Range::all() ? cvSlice(start, end) : CV_WHOLE_SEQ; }
+
+
+
+//////////////////////////////// Vector ////////////////////////////////
+
+// template vector class. It is similar to STL's vector,
+// with a few important differences:
+// 1) it can be created on top of user-allocated data w/o copying it
+// 2) vector b = a means copying the header,
+// not the underlying data (use clone() to make a deep copy)
+template <typename _Tp> class Vector
+{
+public:
+ typedef _Tp value_type;
+ typedef _Tp* iterator;
+ typedef const _Tp* const_iterator;
+ typedef _Tp& reference;
+ typedef const _Tp& const_reference;
+
+ struct Hdr
+ {
+ Hdr() : data(0), datastart(0), refcount(0), size(0), capacity(0) {};
+ _Tp* data;
+ _Tp* datastart;
+ int* refcount;
+ size_t size;
+ size_t capacity;
+ };
+
+ Vector() {}
+ Vector(size_t _size) { resize(_size); }
+ Vector(size_t _size, const _Tp& val)
+ {
+ resize(_size);
+ for(size_t i = 0; i < _size; i++)
+ hdr.data[i] = val;
+ }
+ Vector(_Tp* _data, size_t _size, bool _copyData=false)
+ { set(_data, _size, _copyData); }
+
+ template<int n> Vector(const Vec<_Tp, n>& vec)
+ { set((_Tp*)&vec.val[0], n, true); }
+
+ Vector(const std::vector<_Tp>& vec, bool _copyData=false)
+ { set(!vec.empty() ? (_Tp*)&vec[0] : 0, vec.size(), _copyData); }
+
+ Vector(const Vector& d) { *this = d; }
+
+ Vector(const Vector& d, const Range& r_)
+ {
+ Range r = r_ == Range::all() ? Range(0, d.size()) : r_;
+ /*if( r == Range::all() )
+ r = Range(0, d.size());*/
+ if( r.size() > 0 && r.start >= 0 && r.end <= d.size() )
+ {
+ if( d.hdr.refcount )
+ CV_XADD(d.hdr.refcount, 1);
+ hdr.refcount = d.hdr.refcount;
+ hdr.datastart = d.hdr.datastart;
+ hdr.data = d.hdr.data + r.start;
+ hdr.capacity = hdr.size = r.size();
+ }
+ }
+
+ Vector<_Tp>& operator = (const Vector& d)
+ {
+ if( this != &d )
+ {
+ if( d.hdr.refcount )
+ CV_XADD(d.hdr.refcount, 1);
+ release();
+ hdr = d.hdr;
+ }
+ return *this;
+ }
+
+ ~Vector() { release(); }
+
+ Vector<_Tp> clone() const
+ { return hdr.data ? Vector<_Tp>(hdr.data, hdr.size, true) : Vector<_Tp>(); }
+
+ void copyTo(Vector<_Tp>& vec) const
+ {
+ size_t i, sz = size();
+ vec.resize(sz);
+ const _Tp* src = hdr.data;
+ _Tp* dst = vec.hdr.data;
+ for( i = 0; i < sz; i++ )
+ dst[i] = src[i];
+ }
+
+ void copyTo(std::vector<_Tp>& vec) const
+ {
+ size_t i, sz = size();
+ vec.resize(sz);
+ const _Tp* src = hdr.data;
+ _Tp* dst = sz ? &vec[0] : 0;
+ for( i = 0; i < sz; i++ )
+ dst[i] = src[i];
+ }
+
+ operator CvMat() const
+ { return cvMat((int)size(), 1, type(), (void*)hdr.data); }
+
+ _Tp& operator [] (size_t i) { CV_DbgAssert( i < size() ); return hdr.data[i]; }
+ const _Tp& operator [] (size_t i) const { CV_DbgAssert( i < size() ); return hdr.data[i]; }
+ Vector operator() (const Range& r) const { return Vector(*this, r); }
+ _Tp& back() { CV_DbgAssert(!empty()); return hdr.data[hdr.size-1]; }
+ const _Tp& back() const { CV_DbgAssert(!empty()); return hdr.data[hdr.size-1]; }
+ _Tp& front() { CV_DbgAssert(!empty()); return hdr.data[0]; }
+ const _Tp& front() const { CV_DbgAssert(!empty()); return hdr.data[0]; }
+
+ _Tp* begin() { return hdr.data; }
+ _Tp* end() { return hdr.data + hdr.size; }
+ const _Tp* begin() const { return hdr.data; }
+ const _Tp* end() const { return hdr.data + hdr.size; }
+
+ void addref() { if( hdr.refcount ) CV_XADD(hdr.refcount, 1); }
+ void release()
+ {
+ if( hdr.refcount && CV_XADD(hdr.refcount, -1) == 1 )
+ {
+ delete[] hdr.datastart;
+ delete hdr.refcount;
+ }
+ hdr = Hdr();
+ }
+
+ void set(_Tp* _data, size_t _size, bool _copyData=false)
+ {
+ if( !_copyData )
+ {
+ release();
+ hdr.data = hdr.datastart = _data;
+ hdr.size = hdr.capacity = _size;
+ hdr.refcount = 0;
+ }
+ else
+ {
+ reserve(_size);
+ for( size_t i = 0; i < _size; i++ )
+ hdr.data[i] = _data[i];
+ hdr.size = _size;
+ }
+ }
+
+ void reserve(size_t newCapacity)
+ {
+ _Tp* newData;
+ int* newRefcount;
+ size_t i, oldSize = hdr.size;
+ if( (!hdr.refcount || *hdr.refcount == 1) && hdr.capacity >= newCapacity )
+ return;
+ newCapacity = std::max(newCapacity, oldSize);
+ newData = new _Tp[newCapacity];
+ newRefcount = new int(1);
+ for( i = 0; i < oldSize; i++ )
+ newData[i] = hdr.data[i];
+ release();
+ hdr.data = hdr.datastart = newData;
+ hdr.capacity = newCapacity;
+ hdr.size = oldSize;
+ hdr.refcount = newRefcount;
+ }
+
+ void resize(size_t newSize)
+ {
+ size_t i;
+ newSize = std::max(newSize, (size_t)0);
+ if( (!hdr.refcount || *hdr.refcount == 1) && hdr.size == newSize )
+ return;
+ if( newSize > hdr.capacity )
+ reserve(std::max(newSize, std::max((size_t)4, hdr.capacity*2)));
+ for( i = hdr.size; i < newSize; i++ )
+ hdr.data[i] = _Tp();
+ hdr.size = newSize;
+ }
+
+ Vector<_Tp>& push_back(const _Tp& elem)
+ {
+ if( hdr.size == hdr.capacity )
+ reserve( std::max((size_t)4, hdr.capacity*2) );
+ hdr.data[hdr.size++] = elem;
+ return *this;
+ }
+
+ Vector<_Tp>& pop_back()
+ {
+ if( hdr.size > 0 )
+ --hdr.size;
+ return *this;
+ }
+
+ size_t size() const { return hdr.size; }
+ size_t capacity() const { return hdr.capacity; }
+ bool empty() const { return hdr.size == 0; }
+ void clear() { resize(0); }
+ int type() const { return DataType<_Tp>::type; }
+
+protected:
+ Hdr hdr;
+};
+
+
+template<typename _Tp> inline typename DataType<_Tp>::work_type
+dot(const Vector<_Tp>& v1, const Vector<_Tp>& v2)
+{
+ typedef typename DataType<_Tp>::work_type _Tw;
+ size_t i = 0, n = v1.size();
+ assert(v1.size() == v2.size());
+
+ _Tw s = 0;
+ const _Tp *ptr1 = &v1[0], *ptr2 = &v2[0];
+ for( ; i < n; i++ )
+ s += (_Tw)ptr1[i]*ptr2[i];
+
+ return s;
+}
+
+// Multiply-with-Carry RNG
+inline RNG::RNG() { state = 0xffffffff; }
+inline RNG::RNG(uint64 _state) { state = _state ? _state : 0xffffffff; }
+inline unsigned RNG::next()
+{
+ state = (uint64)(unsigned)state*CV_RNG_COEFF + (unsigned)(state >> 32);
+ return (unsigned)state;
+}
+
+inline RNG::operator uchar() { return (uchar)next(); }
+inline RNG::operator schar() { return (schar)next(); }
+inline RNG::operator ushort() { return (ushort)next(); }
+inline RNG::operator short() { return (short)next(); }
+inline RNG::operator unsigned() { return next(); }
+inline unsigned RNG::operator ()(unsigned N) {return (unsigned)uniform(0,N);}
+inline unsigned RNG::operator ()() {return next();}
+inline RNG::operator int() { return (int)next(); }
+// * (2^32-1)^-1
+inline RNG::operator float() { return next()*2.3283064365386962890625e-10f; }
+inline RNG::operator double()
+{
+ unsigned t = next();
+ return (((uint64)t << 32) | next())*5.4210108624275221700372640043497e-20;
+}
+inline int RNG::uniform(int a, int b) { return a == b ? a : (int)(next()%(b - a) + a); }
+inline float RNG::uniform(float a, float b) { return ((float)*this)*(b - a) + a; }
+inline double RNG::uniform(double a, double b) { return ((double)*this)*(b - a) + a; }
+
+inline TermCriteria::TermCriteria() : type(0), maxCount(0), epsilon(0) {}
+inline TermCriteria::TermCriteria(int _type, int _maxCount, double _epsilon)
+ : type(_type), maxCount(_maxCount), epsilon(_epsilon) {}
+inline TermCriteria::TermCriteria(const CvTermCriteria& criteria)
+ : type(criteria.type), maxCount(criteria.max_iter), epsilon(criteria.epsilon) {}
+inline TermCriteria::operator CvTermCriteria() const
+{ return cvTermCriteria(type, maxCount, epsilon); }
+
+inline uchar* LineIterator::operator *() { return ptr; }
+inline LineIterator& LineIterator::operator ++()
+{
+ int mask = err < 0 ? -1 : 0;
+ err += minusDelta + (plusDelta & mask);
+ ptr += minusStep + (plusStep & mask);
+ return *this;
+}
+inline LineIterator LineIterator::operator ++(int)
+{
+ LineIterator it = *this;
+ ++(*this);
+ return it;
+}
+inline Point LineIterator::pos() const
+{
+ Point p;
+ p.y = (int)((ptr - ptr0)/step);
+ p.x = (int)(((ptr - ptr0) - p.y*step)/elemSize);
+ return p;
+}
+
+/////////////////////////////// AutoBuffer ////////////////////////////////////////
+
+template<typename _Tp, size_t fixed_size> inline AutoBuffer<_Tp, fixed_size>::AutoBuffer()
+{
+ ptr = buf;
+ size = fixed_size;
+}
+
+template<typename _Tp, size_t fixed_size> inline AutoBuffer<_Tp, fixed_size>::AutoBuffer(size_t _size)
+{
+ ptr = buf;
+ size = fixed_size;
+ allocate(_size);
+}
+
+template<typename _Tp, size_t fixed_size> inline AutoBuffer<_Tp, fixed_size>::~AutoBuffer()
+{ deallocate(); }
+
+template<typename _Tp, size_t fixed_size> inline void AutoBuffer<_Tp, fixed_size>::allocate(size_t _size)
+{
+ if(_size <= size)
+ return;
+ deallocate();
+ if(_size > fixed_size)
+ {
+ ptr = cv::allocate<_Tp>(_size);
+ size = _size;
+ }
+}
+
+template<typename _Tp, size_t fixed_size> inline void AutoBuffer<_Tp, fixed_size>::deallocate()
+{
+ if( ptr != buf )
+ {
+ cv::deallocate<_Tp>(ptr, size);
+ ptr = buf;
+ size = fixed_size;
+ }
+}
+
+template<typename _Tp, size_t fixed_size> inline AutoBuffer<_Tp, fixed_size>::operator _Tp* ()
+{ return ptr; }
+
+template<typename _Tp, size_t fixed_size> inline AutoBuffer<_Tp, fixed_size>::operator const _Tp* () const
+{ return ptr; }
+
+
+/////////////////////////////////// Ptr ////////////////////////////////////////
+
+template<typename _Tp> inline Ptr<_Tp>::Ptr() : obj(0), refcount(0) {}
+template<typename _Tp> inline Ptr<_Tp>::Ptr(_Tp* _obj) : obj(_obj)
+{
+ if(obj)
+ {
+ refcount = (int*)fastMalloc(sizeof(*refcount));
+ *refcount = 1;
+ }
+ else
+ refcount = 0;
+}
+
+template<typename _Tp> inline void Ptr<_Tp>::addref()
+{ if( refcount ) CV_XADD(refcount, 1); }
+
+template<typename _Tp> inline void Ptr<_Tp>::release()
+{
+ if( refcount && CV_XADD(refcount, -1) == 1 )
+ {
+ delete_obj();
+ fastFree(refcount);
+ }
+ refcount = 0;
+ obj = 0;
+}
+
+template<typename _Tp> inline void Ptr<_Tp>::delete_obj()
+{
+ if( obj ) delete obj;
+}
+
+template<typename _Tp> inline Ptr<_Tp>::~Ptr() { release(); }
+
+template<typename _Tp> inline Ptr<_Tp>::Ptr(const Ptr<_Tp>& _ptr)
+{
+ obj = _ptr.obj;
+ refcount = _ptr.refcount;
+ addref();
+}
+
+template<typename _Tp> inline Ptr<_Tp>& Ptr<_Tp>::operator = (const Ptr<_Tp>& _ptr)
+{
+ if (this != &_ptr)
+ {
+ int* _refcount = _ptr.refcount;
+ if( _refcount )
+ CV_XADD(_refcount, 1);
+ release();
+ obj = _ptr.obj;
+ refcount = _refcount;
+ }
+ return *this;
+}
+
+template<typename _Tp> inline _Tp* Ptr<_Tp>::operator -> () { return obj; }
+template<typename _Tp> inline const _Tp* Ptr<_Tp>::operator -> () const { return obj; }
+
+template<typename _Tp> inline Ptr<_Tp>::operator _Tp* () { return obj; }
+template<typename _Tp> inline Ptr<_Tp>::operator const _Tp*() const { return obj; }
+
+template<typename _Tp> inline bool Ptr<_Tp>::empty() const { return obj == 0; }
+
+template<typename _Tp> template<typename _Tp2> Ptr<_Tp>::Ptr(const Ptr<_Tp2>& p)
+ : obj(0), refcount(0)
+{
+ if (p.empty())
+ return;
+
+ _Tp* p_casted = dynamic_cast<_Tp*>(p.obj);
+ if (!p_casted)
+ return;
+
+ obj = p_casted;
+ refcount = p.refcount;
+ addref();
+}
+
+template<typename _Tp> template<typename _Tp2> inline Ptr<_Tp2> Ptr<_Tp>::ptr()
+{
+ Ptr<_Tp2> p;
+ if( !obj )
+ return p;
+
+ _Tp2* obj_casted = dynamic_cast<_Tp2*>(obj);
+ if (!obj_casted)
+ return p;
+
+ if( refcount )
+ CV_XADD(refcount, 1);
+
+ p.obj = obj_casted;
+ p.refcount = refcount;
+ return p;
+}
+
+template<typename _Tp> template<typename _Tp2> inline const Ptr<_Tp2> Ptr<_Tp>::ptr() const
+{
+ Ptr<_Tp2> p;
+ if( !obj )
+ return p;
+
+ _Tp2* obj_casted = dynamic_cast<_Tp2*>(obj);
+ if (!obj_casted)
+ return p;
+
+ if( refcount )
+ CV_XADD(refcount, 1);
+
+ p.obj = obj_casted;
+ p.refcount = refcount;
+ return p;
+}
+
+template<typename T>
+Ptr<T> makePtr()
+{
+ return Ptr<T>(new T());
+}
+
+template<typename T, typename A1>
+Ptr<T> makePtr(const A1& a1)
+{
+ return Ptr<T>(new T(a1));
+}
+
+template<typename T, typename A1, typename A2>
+Ptr<T> makePtr(const A1& a1, const A2& a2)
+{
+ return Ptr<T>(new T(a1, a2));
+}
+
+template<typename T, typename A1, typename A2, typename A3>
+Ptr<T> makePtr(const A1& a1, const A2& a2, const A3& a3)
+{
+ return Ptr<T>(new T(a1, a2, a3));
+}
+
+template<typename T, typename A1, typename A2, typename A3, typename A4>
+Ptr<T> makePtr(const A1& a1, const A2& a2, const A3& a3, const A4& a4)
+{
+ return Ptr<T>(new T(a1, a2, a3, a4));
+}
+
+template<typename T, typename A1, typename A2, typename A3, typename A4, typename A5>
+Ptr<T> makePtr(const A1& a1, const A2& a2, const A3& a3, const A4& a4, const A5& a5)
+{
+ return Ptr<T>(new T(a1, a2, a3, a4, a5));
+}
+
+template<typename T, typename A1, typename A2, typename A3, typename A4, typename A5, typename A6>
+Ptr<T> makePtr(const A1& a1, const A2& a2, const A3& a3, const A4& a4, const A5& a5, const A6& a6)
+{
+ return Ptr<T>(new T(a1, a2, a3, a4, a5, a6));
+}
+
+template<typename T, typename A1, typename A2, typename A3, typename A4, typename A5, typename A6, typename A7>
+Ptr<T> makePtr(const A1& a1, const A2& a2, const A3& a3, const A4& a4, const A5& a5, const A6& a6, const A7& a7)
+{
+ return Ptr<T>(new T(a1, a2, a3, a4, a5, a6, a7));
+}
+
+template<typename T, typename A1, typename A2, typename A3, typename A4, typename A5, typename A6, typename A7, typename A8>
+Ptr<T> makePtr(const A1& a1, const A2& a2, const A3& a3, const A4& a4, const A5& a5, const A6& a6, const A7& a7, const A8& a8)
+{
+ return Ptr<T>(new T(a1, a2, a3, a4, a5, a6, a7, a8));
+}
+
+template<typename T, typename A1, typename A2, typename A3, typename A4, typename A5, typename A6, typename A7, typename A8, typename A9>
+Ptr<T> makePtr(const A1& a1, const A2& a2, const A3& a3, const A4& a4, const A5& a5, const A6& a6, const A7& a7, const A8& a8, const A9& a9)
+{
+ return Ptr<T>(new T(a1, a2, a3, a4, a5, a6, a7, a8, a9));
+}
+
+template<typename T, typename A1, typename A2, typename A3, typename A4, typename A5, typename A6, typename A7, typename A8, typename A9, typename A10>
+Ptr<T> makePtr(const A1& a1, const A2& a2, const A3& a3, const A4& a4, const A5& a5, const A6& a6, const A7& a7, const A8& a8, const A9& a9, const A10& a10)
+{
+ return Ptr<T>(new T(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10));
+}
+
+//// specializied implementations of Ptr::delete_obj() for classic OpenCV types
+
+template<> CV_EXPORTS void Ptr<CvMat>::delete_obj();
+template<> CV_EXPORTS void Ptr<IplImage>::delete_obj();
+template<> CV_EXPORTS void Ptr<CvMatND>::delete_obj();
+template<> CV_EXPORTS void Ptr<CvSparseMat>::delete_obj();
+template<> CV_EXPORTS void Ptr<CvMemStorage>::delete_obj();
+template<> CV_EXPORTS void Ptr<CvFileStorage>::delete_obj();
+
+//////////////////////////////////////// XML & YAML I/O ////////////////////////////////////
+
+CV_EXPORTS_W void write( FileStorage& fs, const string& name, int value );
+CV_EXPORTS_W void write( FileStorage& fs, const string& name, float value );
+CV_EXPORTS_W void write( FileStorage& fs, const string& name, double value );
+CV_EXPORTS_W void write( FileStorage& fs, const string& name, const string& value );
+
+template<typename _Tp> inline void write(FileStorage& fs, const _Tp& value)
+{ write(fs, string(), value); }
+
+CV_EXPORTS void writeScalar( FileStorage& fs, int value );
+CV_EXPORTS void writeScalar( FileStorage& fs, float value );
+CV_EXPORTS void writeScalar( FileStorage& fs, double value );
+CV_EXPORTS void writeScalar( FileStorage& fs, const string& value );
+
+template<> inline void write( FileStorage& fs, const int& value )
+{
+ writeScalar(fs, value);
+}
+
+template<> inline void write( FileStorage& fs, const float& value )
+{
+ writeScalar(fs, value);
+}
+
+template<> inline void write( FileStorage& fs, const double& value )
+{
+ writeScalar(fs, value);
+}
+
+template<> inline void write( FileStorage& fs, const string& value )
+{
+ writeScalar(fs, value);
+}
+
+template<typename _Tp> inline void write(FileStorage& fs, const Point_<_Tp>& pt )
+{
+ write(fs, pt.x);
+ write(fs, pt.y);
+}
+
+template<typename _Tp> inline void write(FileStorage& fs, const Point3_<_Tp>& pt )
+{
+ write(fs, pt.x);
+ write(fs, pt.y);
+ write(fs, pt.z);
+}
+
+template<typename _Tp> inline void write(FileStorage& fs, const Size_<_Tp>& sz )
+{
+ write(fs, sz.width);
+ write(fs, sz.height);
+}
+
+template<typename _Tp> inline void write(FileStorage& fs, const Complex<_Tp>& c )
+{
+ write(fs, c.re);
+ write(fs, c.im);
+}
+
+template<typename _Tp> inline void write(FileStorage& fs, const Rect_<_Tp>& r )
+{
+ write(fs, r.x);
+ write(fs, r.y);
+ write(fs, r.width);
+ write(fs, r.height);
+}
+
+template<typename _Tp, int cn> inline void write(FileStorage& fs, const Vec<_Tp, cn>& v )
+{
+ for(int i = 0; i < cn; i++)
+ write(fs, v.val[i]);
+}
+
+template<typename _Tp> inline void write(FileStorage& fs, const Scalar_<_Tp>& s )
+{
+ write(fs, s.val[0]);
+ write(fs, s.val[1]);
+ write(fs, s.val[2]);
+ write(fs, s.val[3]);
+}
+
+inline void write(FileStorage& fs, const Range& r )
+{
+ write(fs, r.start);
+ write(fs, r.end);
+}
+
+class CV_EXPORTS WriteStructContext
+{
+public:
+ WriteStructContext(FileStorage& _fs, const string& name,
+ int flags, const string& typeName=string());
+ ~WriteStructContext();
+ FileStorage* fs;
+};
+
+template<typename _Tp> inline void write(FileStorage& fs, const string& name, const Point_<_Tp>& pt )
+{
+ WriteStructContext ws(fs, name, CV_NODE_SEQ+CV_NODE_FLOW);
+ write(fs, pt.x);
+ write(fs, pt.y);
+}
+
+template<typename _Tp> inline void write(FileStorage& fs, const string& name, const Point3_<_Tp>& pt )
+{
+ WriteStructContext ws(fs, name, CV_NODE_SEQ+CV_NODE_FLOW);
+ write(fs, pt.x);
+ write(fs, pt.y);
+ write(fs, pt.z);
+}
+
+template<typename _Tp> inline void write(FileStorage& fs, const string& name, const Size_<_Tp>& sz )
+{
+ WriteStructContext ws(fs, name, CV_NODE_SEQ+CV_NODE_FLOW);
+ write(fs, sz.width);
+ write(fs, sz.height);
+}
+
+template<typename _Tp> inline void write(FileStorage& fs, const string& name, const Complex<_Tp>& c )
+{
+ WriteStructContext ws(fs, name, CV_NODE_SEQ+CV_NODE_FLOW);
+ write(fs, c.re);
+ write(fs, c.im);
+}
+
+template<typename _Tp> inline void write(FileStorage& fs, const string& name, const Rect_<_Tp>& r )
+{
+ WriteStructContext ws(fs, name, CV_NODE_SEQ+CV_NODE_FLOW);
+ write(fs, r.x);
+ write(fs, r.y);
+ write(fs, r.width);
+ write(fs, r.height);
+}
+
+template<typename _Tp, int cn> inline void write(FileStorage& fs, const string& name, const Vec<_Tp, cn>& v )
+{
+ WriteStructContext ws(fs, name, CV_NODE_SEQ+CV_NODE_FLOW);
+ for(int i = 0; i < cn; i++)
+ write(fs, v.val[i]);
+}
+
+template<typename _Tp> inline void write(FileStorage& fs, const string& name, const Scalar_<_Tp>& s )
+{
+ WriteStructContext ws(fs, name, CV_NODE_SEQ+CV_NODE_FLOW);
+ write(fs, s.val[0]);
+ write(fs, s.val[1]);
+ write(fs, s.val[2]);
+ write(fs, s.val[3]);
+}
+
+inline void write(FileStorage& fs, const string& name, const Range& r )
+{
+ WriteStructContext ws(fs, name, CV_NODE_SEQ+CV_NODE_FLOW);
+ write(fs, r.start);
+ write(fs, r.end);
+}
+
+template<typename _Tp, int numflag> class VecWriterProxy
+{
+public:
+ VecWriterProxy( FileStorage* _fs ) : fs(_fs) {}
+ void operator()(const vector<_Tp>& vec) const
+ {
+ size_t i, count = vec.size();
+ for( i = 0; i < count; i++ )
+ write( *fs, vec[i] );
+ }
+ FileStorage* fs;
+};
+
+template<typename _Tp> class VecWriterProxy<_Tp,1>
+{
+public:
+ VecWriterProxy( FileStorage* _fs ) : fs(_fs) {}
+ void operator()(const vector<_Tp>& vec) const
+ {
+ int _fmt = DataType<_Tp>::fmt;
+ char fmt[] = { (char)((_fmt>>8)+'1'), (char)_fmt, '\0' };
+ fs->writeRaw( string(fmt), !vec.empty() ? (uchar*)&vec[0] : 0, vec.size()*sizeof(_Tp) );
+ }
+ FileStorage* fs;
+};
+
+template<typename _Tp> static inline void write( FileStorage& fs, const vector<_Tp>& vec )
+{
+ VecWriterProxy<_Tp, DataType<_Tp>::fmt != 0> w(&fs);
+ w(vec);
+}
+
+template<typename _Tp> static inline void write( FileStorage& fs, const string& name,
+ const vector<_Tp>& vec )
+{
+ WriteStructContext ws(fs, name, CV_NODE_SEQ+(DataType<_Tp>::fmt != 0 ? CV_NODE_FLOW : 0));
+ write(fs, vec);
+}
+
+CV_EXPORTS_W void write( FileStorage& fs, const string& name, const Mat& value );
+CV_EXPORTS void write( FileStorage& fs, const string& name, const SparseMat& value );
+
+template<typename _Tp> static inline FileStorage& operator << (FileStorage& fs, const _Tp& value)
+{
+ if( !fs.isOpened() )
+ return fs;
+ if( fs.state == FileStorage::NAME_EXPECTED + FileStorage::INSIDE_MAP )
+ CV_Error( CV_StsError, "No element name has been given" );
+ write( fs, fs.elname, value );
+ if( fs.state & FileStorage::INSIDE_MAP )
+ fs.state = FileStorage::NAME_EXPECTED + FileStorage::INSIDE_MAP;
+ return fs;
+}
+
+CV_EXPORTS FileStorage& operator << (FileStorage& fs, const string& str);
+
+static inline FileStorage& operator << (FileStorage& fs, const char* str)
+{ return (fs << string(str)); }
+
+static inline FileStorage& operator << (FileStorage& fs, char* value)
+{ return (fs << string(value)); }
+
+inline FileNode::FileNode() : fs(0), node(0) {}
+inline FileNode::FileNode(const CvFileStorage* _fs, const CvFileNode* _node)
+ : fs(_fs), node(_node) {}
+
+inline FileNode::FileNode(const FileNode& _node) : fs(_node.fs), node(_node.node) {}
+
+inline int FileNode::type() const { return !node ? NONE : (node->tag & TYPE_MASK); }
+inline bool FileNode::empty() const { return node == 0; }
+inline bool FileNode::isNone() const { return type() == NONE; }
+inline bool FileNode::isSeq() const { return type() == SEQ; }
+inline bool FileNode::isMap() const { return type() == MAP; }
+inline bool FileNode::isInt() const { return type() == INT; }
+inline bool FileNode::isReal() const { return type() == REAL; }
+inline bool FileNode::isString() const { return type() == STR; }
+inline bool FileNode::isNamed() const { return !node ? false : (node->tag & NAMED) != 0; }
+inline size_t FileNode::size() const
+{
+ int t = type();
+ return t == MAP ? (size_t)((CvSet*)node->data.map)->active_count :
+ t == SEQ ? (size_t)node->data.seq->total : (size_t)!isNone();
+}
+
+inline CvFileNode* FileNode::operator *() { return (CvFileNode*)node; }
+inline const CvFileNode* FileNode::operator* () const { return node; }
+
+static inline void read(const FileNode& node, int& value, int default_value)
+{
+ value = !node.node ? default_value :
+ CV_NODE_IS_INT(node.node->tag) ? node.node->data.i :
+ CV_NODE_IS_REAL(node.node->tag) ? cvRound(node.node->data.f) : 0x7fffffff;
+}
+
+static inline void read(const FileNode& node, bool& value, bool default_value)
+{
+ int temp; read(node, temp, (int)default_value);
+ value = temp != 0;
+}
+
+static inline void read(const FileNode& node, uchar& value, uchar default_value)
+{
+ int temp; read(node, temp, (int)default_value);
+ value = saturate_cast<uchar>(temp);
+}
+
+static inline void read(const FileNode& node, schar& value, schar default_value)
+{
+ int temp; read(node, temp, (int)default_value);
+ value = saturate_cast<schar>(temp);
+}
+
+static inline void read(const FileNode& node, ushort& value, ushort default_value)
+{
+ int temp; read(node, temp, (int)default_value);
+ value = saturate_cast<ushort>(temp);
+}
+
+static inline void read(const FileNode& node, short& value, short default_value)
+{
+ int temp; read(node, temp, (int)default_value);
+ value = saturate_cast<short>(temp);
+}
+
+static inline void read(const FileNode& node, float& value, float default_value)
+{
+ value = !node.node ? default_value :
+ CV_NODE_IS_INT(node.node->tag) ? (float)node.node->data.i :
+ CV_NODE_IS_REAL(node.node->tag) ? (float)node.node->data.f : 1e30f;
+}
+
+static inline void read(const FileNode& node, double& value, double default_value)
+{
+ value = !node.node ? default_value :
+ CV_NODE_IS_INT(node.node->tag) ? (double)node.node->data.i :
+ CV_NODE_IS_REAL(node.node->tag) ? node.node->data.f : 1e300;
+}
+
+static inline void read(const FileNode& node, string& value, const string& default_value)
+{
+ value = !node.node ? default_value : CV_NODE_IS_STRING(node.node->tag) ? string(node.node->data.str.ptr) : string("");
+}
+
+template<typename _Tp> static inline void read(const FileNode& node, Point_<_Tp>& value, const Point_<_Tp>& default_value)
+{
+ vector<_Tp> temp; FileNodeIterator it = node.begin(); it >> temp;
+ value = temp.size() != 2 ? default_value : Point_<_Tp>(saturate_cast<_Tp>(temp[0]), saturate_cast<_Tp>(temp[1]));
+}
+
+template<typename _Tp> static inline void read(const FileNode& node, Point3_<_Tp>& value, const Point3_<_Tp>& default_value)
+{
+ vector<_Tp> temp; FileNodeIterator it = node.begin(); it >> temp;
+ value = temp.size() != 3 ? default_value : Point3_<_Tp>(saturate_cast<_Tp>(temp[0]), saturate_cast<_Tp>(temp[1]),
+ saturate_cast<_Tp>(temp[2]));
+}
+
+template<typename _Tp> static inline void read(const FileNode& node, Size_<_Tp>& value, const Size_<_Tp>& default_value)
+{
+ vector<_Tp> temp; FileNodeIterator it = node.begin(); it >> temp;
+ value = temp.size() != 2 ? default_value : Size_<_Tp>(saturate_cast<_Tp>(temp[0]), saturate_cast<_Tp>(temp[1]));
+}
+
+template<typename _Tp> static inline void read(const FileNode& node, Complex<_Tp>& value, const Complex<_Tp>& default_value)
+{
+ vector<_Tp> temp; FileNodeIterator it = node.begin(); it >> temp;
+ value = temp.size() != 2 ? default_value : Complex<_Tp>(saturate_cast<_Tp>(temp[0]), saturate_cast<_Tp>(temp[1]));
+}
+
+template<typename _Tp> static inline void read(const FileNode& node, Rect_<_Tp>& value, const Rect_<_Tp>& default_value)
+{
+ vector<_Tp> temp; FileNodeIterator it = node.begin(); it >> temp;
+ value = temp.size() != 4 ? default_value : Rect_<_Tp>(saturate_cast<_Tp>(temp[0]), saturate_cast<_Tp>(temp[1]),
+ saturate_cast<_Tp>(temp[2]), saturate_cast<_Tp>(temp[3]));
+}
+
+template<typename _Tp, int cn> static inline void read(const FileNode& node, Vec<_Tp, cn>& value, const Vec<_Tp, cn>& default_value)
+{
+ vector<_Tp> temp; FileNodeIterator it = node.begin(); it >> temp;
+ value = temp.size() != cn ? default_value : Vec<_Tp, cn>(&temp[0]);
+}
+
+template<typename _Tp> static inline void read(const FileNode& node, Scalar_<_Tp>& value, const Scalar_<_Tp>& default_value)
+{
+ vector<_Tp> temp; FileNodeIterator it = node.begin(); it >> temp;
+ value = temp.size() != 4 ? default_value : Scalar_<_Tp>(saturate_cast<_Tp>(temp[0]), saturate_cast<_Tp>(temp[1]),
+ saturate_cast<_Tp>(temp[2]), saturate_cast<_Tp>(temp[3]));
+}
+
+static inline void read(const FileNode& node, Range& value, const Range& default_value)
+{
+ Point2i temp(value.start, value.end); const Point2i default_temp = Point2i(default_value.start, default_value.end);
+ read(node, temp, default_temp);
+ value.start = temp.x; value.end = temp.y;
+}
+
+CV_EXPORTS_W void read(const FileNode& node, Mat& mat, const Mat& default_mat=Mat() );
+CV_EXPORTS void read(const FileNode& node, SparseMat& mat, const SparseMat& default_mat=SparseMat() );
+
+inline FileNode::operator int() const
+{
+ int value;
+ read(*this, value, 0);
+ return value;
+}
+inline FileNode::operator float() const
+{
+ float value;
+ read(*this, value, 0.f);
+ return value;
+}
+inline FileNode::operator double() const
+{
+ double value;
+ read(*this, value, 0.);
+ return value;
+}
+inline FileNode::operator string() const
+{
+ string value;
+ read(*this, value, value);
+ return value;
+}
+
+inline void FileNode::readRaw( const string& fmt, uchar* vec, size_t len ) const
+{
+ begin().readRaw( fmt, vec, len );
+}
+
+template<typename _Tp, int numflag> class VecReaderProxy
+{
+public:
+ VecReaderProxy( FileNodeIterator* _it ) : it(_it) {}
+ void operator()(vector<_Tp>& vec, size_t count) const
+ {
+ count = std::min(count, it->remaining);
+ vec.resize(count);
+ for( size_t i = 0; i < count; i++, ++(*it) )
+ read(**it, vec[i], _Tp());
+ }
+ FileNodeIterator* it;
+};
+
+template<typename _Tp> class VecReaderProxy<_Tp,1>
+{
+public:
+ VecReaderProxy( FileNodeIterator* _it ) : it(_it) {}
+ void operator()(vector<_Tp>& vec, size_t count) const
+ {
+ size_t remaining = it->remaining, cn = DataType<_Tp>::channels;
+ int _fmt = DataType<_Tp>::fmt;
+ char fmt[] = { (char)((_fmt>>8)+'1'), (char)_fmt, '\0' };
+ size_t remaining1 = remaining/cn;
+ count = count < remaining1 ? count : remaining1;
+ vec.resize(count);
+ it->readRaw( string(fmt), !vec.empty() ? (uchar*)&vec[0] : 0, count*sizeof(_Tp) );
+ }
+ FileNodeIterator* it;
+};
+
+template<typename _Tp> static inline void
+read( FileNodeIterator& it, vector<_Tp>& vec, size_t maxCount=(size_t)INT_MAX )
+{
+ VecReaderProxy<_Tp, DataType<_Tp>::fmt != 0> r(&it);
+ r(vec, maxCount);
+}
+
+template<typename _Tp> static inline void
+read( const FileNode& node, vector<_Tp>& vec, const vector<_Tp>& default_value=vector<_Tp>() )
+{
+ if(!node.node)
+ vec = default_value;
+ else
+ {
+ FileNodeIterator it = node.begin();
+ read( it, vec );
+ }
+}
+
+inline FileNodeIterator FileNode::begin() const
+{
+ return FileNodeIterator(fs, node);
+}
+
+inline FileNodeIterator FileNode::end() const
+{
+ return FileNodeIterator(fs, node, size());
+}
+
+inline FileNode FileNodeIterator::operator *() const
+{ return FileNode(fs, (const CvFileNode*)(void*)reader.ptr); }
+
+inline FileNode FileNodeIterator::operator ->() const
+{ return FileNode(fs, (const CvFileNode*)(void*)reader.ptr); }
+
+template<typename _Tp> static inline FileNodeIterator& operator >> (FileNodeIterator& it, _Tp& value)
+{ read( *it, value, _Tp()); return ++it; }
+
+template<typename _Tp> static inline
+FileNodeIterator& operator >> (FileNodeIterator& it, vector<_Tp>& vec)
+{
+ VecReaderProxy<_Tp, DataType<_Tp>::fmt != 0> r(&it);
+ r(vec, (size_t)INT_MAX);
+ return it;
+}
+
+template<typename _Tp> static inline void operator >> (const FileNode& n, _Tp& value)
+{ read( n, value, _Tp()); }
+
+template<typename _Tp> static inline void operator >> (const FileNode& n, vector<_Tp>& vec)
+{ FileNodeIterator it = n.begin(); it >> vec; }
+
+static inline bool operator == (const FileNodeIterator& it1, const FileNodeIterator& it2)
+{
+ return it1.fs == it2.fs && it1.container == it2.container &&
+ it1.reader.ptr == it2.reader.ptr && it1.remaining == it2.remaining;
+}
+
+static inline bool operator != (const FileNodeIterator& it1, const FileNodeIterator& it2)
+{
+ return !(it1 == it2);
+}
+
+static inline ptrdiff_t operator - (const FileNodeIterator& it1, const FileNodeIterator& it2)
+{
+ return it2.remaining - it1.remaining;
+}
+
+static inline bool operator < (const FileNodeIterator& it1, const FileNodeIterator& it2)
+{
+ return it1.remaining > it2.remaining;
+}
+
+inline FileNode FileStorage::getFirstTopLevelNode() const
+{
+ FileNode r = root();
+ FileNodeIterator it = r.begin();
+ return it != r.end() ? *it : FileNode();
+}
+
+//////////////////////////////////////// Various algorithms ////////////////////////////////////
+
+template<typename _Tp> static inline _Tp gcd(_Tp a, _Tp b)
+{
+ if( a < b )
+ std::swap(a, b);
+ while( b > 0 )
+ {
+ _Tp r = a % b;
+ a = b;
+ b = r;
+ }
+ return a;
+}
+
+/****************************************************************************************\
+
+ Generic implementation of QuickSort algorithm
+ Use it as: vector<_Tp> a; ... sort(a,<less_than_predictor>);
+
+ The current implementation was derived from *BSD system qsort():
+
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+
+\****************************************************************************************/
+
+template<typename _Tp, class _LT> void sort( vector<_Tp>& vec, _LT LT=_LT() )
+{
+ int isort_thresh = 7;
+ int sp = 0;
+
+ struct
+ {
+ _Tp *lb;
+ _Tp *ub;
+ } stack[48];
+
+ size_t total = vec.size();
+
+ if( total <= 1 )
+ return;
+
+ _Tp* arr = &vec[0];
+ stack[0].lb = arr;
+ stack[0].ub = arr + (total - 1);
+
+ while( sp >= 0 )
+ {
+ _Tp* left = stack[sp].lb;
+ _Tp* right = stack[sp--].ub;
+
+ for(;;)
+ {
+ int i, n = (int)(right - left) + 1, m;
+ _Tp* ptr;
+ _Tp* ptr2;
+
+ if( n <= isort_thresh )
+ {
+ insert_sort:
+ for( ptr = left + 1; ptr <= right; ptr++ )
+ {
+ for( ptr2 = ptr; ptr2 > left && LT(ptr2[0],ptr2[-1]); ptr2--)
+ std::swap( ptr2[0], ptr2[-1] );
+ }
+ break;
+ }
+ else
+ {
+ _Tp* left0;
+ _Tp* left1;
+ _Tp* right0;
+ _Tp* right1;
+ _Tp* pivot;
+ _Tp* a;
+ _Tp* b;
+ _Tp* c;
+ int swap_cnt = 0;
+
+ left0 = left;
+ right0 = right;
+ pivot = left + (n/2);
+
+ if( n > 40 )
+ {
+ int d = n / 8;
+ a = left, b = left + d, c = left + 2*d;
+ left = LT(*a, *b) ? (LT(*b, *c) ? b : (LT(*a, *c) ? c : a))
+ : (LT(*c, *b) ? b : (LT(*a, *c) ? a : c));
+
+ a = pivot - d, b = pivot, c = pivot + d;
+ pivot = LT(*a, *b) ? (LT(*b, *c) ? b : (LT(*a, *c) ? c : a))
+ : (LT(*c, *b) ? b : (LT(*a, *c) ? a : c));
+
+ a = right - 2*d, b = right - d, c = right;
+ right = LT(*a, *b) ? (LT(*b, *c) ? b : (LT(*a, *c) ? c : a))
+ : (LT(*c, *b) ? b : (LT(*a, *c) ? a : c));
+ }
+
+ a = left, b = pivot, c = right;
+ pivot = LT(*a, *b) ? (LT(*b, *c) ? b : (LT(*a, *c) ? c : a))
+ : (LT(*c, *b) ? b : (LT(*a, *c) ? a : c));
+ if( pivot != left0 )
+ {
+ std::swap( *pivot, *left0 );
+ pivot = left0;
+ }
+ left = left1 = left0 + 1;
+ right = right1 = right0;
+
+ for(;;)
+ {
+ while( left <= right && !LT(*pivot, *left) )
+ {
+ if( !LT(*left, *pivot) )
+ {
+ if( left > left1 )
+ std::swap( *left1, *left );
+ swap_cnt = 1;
+ left1++;
+ }
+ left++;
+ }
+
+ while( left <= right && !LT(*right, *pivot) )
+ {
+ if( !LT(*pivot, *right) )
+ {
+ if( right < right1 )
+ std::swap( *right1, *right );
+ swap_cnt = 1;
+ right1--;
+ }
+ right--;
+ }
+
+ if( left > right )
+ break;
+ std::swap( *left, *right );
+ swap_cnt = 1;
+ left++;
+ right--;
+ }
+
+ if( swap_cnt == 0 )
+ {
+ left = left0, right = right0;
+ goto insert_sort;
+ }
+
+ n = std::min( (int)(left1 - left0), (int)(left - left1) );
+ for( i = 0; i < n; i++ )
+ std::swap( left0[i], left[i-n] );
+
+ n = std::min( (int)(right0 - right1), (int)(right1 - right) );
+ for( i = 0; i < n; i++ )
+ std::swap( left[i], right0[i-n+1] );
+ n = (int)(left - left1);
+ m = (int)(right1 - right);
+ if( n > 1 )
+ {
+ if( m > 1 )
+ {
+ if( n > m )
+ {
+ stack[++sp].lb = left0;
+ stack[sp].ub = left0 + n - 1;
+ left = right0 - m + 1, right = right0;
+ }
+ else
+ {
+ stack[++sp].lb = right0 - m + 1;
+ stack[sp].ub = right0;
+ left = left0, right = left0 + n - 1;
+ }
+ }
+ else
+ left = left0, right = left0 + n - 1;
+ }
+ else if( m > 1 )
+ left = right0 - m + 1, right = right0;
+ else
+ break;
+ }
+ }
+ }
+}
+
+template<typename _Tp> class LessThan
+{
+public:
+ bool operator()(const _Tp& a, const _Tp& b) const { return a < b; }
+};
+
+template<typename _Tp> class GreaterEq
+{
+public:
+ bool operator()(const _Tp& a, const _Tp& b) const { return a >= b; }
+};
+
+template<typename _Tp> class LessThanIdx
+{
+public:
+ LessThanIdx( const _Tp* _arr ) : arr(_arr) {}
+ bool operator()(int a, int b) const { return arr[a] < arr[b]; }
+ const _Tp* arr;
+};
+
+template<typename _Tp> class GreaterEqIdx
+{
+public:
+ GreaterEqIdx( const _Tp* _arr ) : arr(_arr) {}
+ bool operator()(int a, int b) const { return arr[a] >= arr[b]; }
+ const _Tp* arr;
+};
+
+
+// This function splits the input sequence or set into one or more equivalence classes and
+// returns the vector of labels - 0-based class indexes for each element.
+// predicate(a,b) returns true if the two sequence elements certainly belong to the same class.
+//
+// The algorithm is described in "Introduction to Algorithms"
+// by Cormen, Leiserson and Rivest, the chapter "Data structures for disjoint sets"
+template<typename _Tp, class _EqPredicate> int
+partition( const vector<_Tp>& _vec, vector<int>& labels,
+ _EqPredicate predicate=_EqPredicate())
+{
+ int i, j, N = (int)_vec.size();
+ const _Tp* vec = &_vec[0];
+
+ const int PARENT=0;
+ const int RANK=1;
+
+ vector<int> _nodes(N*2);
+ int (*nodes)[2] = (int(*)[2])&_nodes[0];
+
+ // The first O(N) pass: create N single-vertex trees
+ for(i = 0; i < N; i++)
+ {
+ nodes[i][PARENT]=-1;
+ nodes[i][RANK] = 0;
+ }
+
+ // The main O(N^2) pass: merge connected components
+ for( i = 0; i < N; i++ )
+ {
+ int root = i;
+
+ // find root
+ while( nodes[root][PARENT] >= 0 )
+ root = nodes[root][PARENT];
+
+ for( j = 0; j < N; j++ )
+ {
+ if( i == j || !predicate(vec[i], vec[j]))
+ continue;
+ int root2 = j;
+
+ while( nodes[root2][PARENT] >= 0 )
+ root2 = nodes[root2][PARENT];
+
+ if( root2 != root )
+ {
+ // unite both trees
+ int rank = nodes[root][RANK], rank2 = nodes[root2][RANK];
+ if( rank > rank2 )
+ nodes[root2][PARENT] = root;
+ else
+ {
+ nodes[root][PARENT] = root2;
+ nodes[root2][RANK] += rank == rank2;
+ root = root2;
+ }
+ assert( nodes[root][PARENT] < 0 );
+
+ int k = j, parent;
+
+ // compress the path from node2 to root
+ while( (parent = nodes[k][PARENT]) >= 0 )
+ {
+ nodes[k][PARENT] = root;
+ k = parent;
+ }
+
+ // compress the path from node to root
+ k = i;
+ while( (parent = nodes[k][PARENT]) >= 0 )
+ {
+ nodes[k][PARENT] = root;
+ k = parent;
+ }
+ }
+ }
+ }
+
+ // Final O(N) pass: enumerate classes
+ labels.resize(N);
+ int nclasses = 0;
+
+ for( i = 0; i < N; i++ )
+ {
+ int root = i;
+ while( nodes[root][PARENT] >= 0 )
+ root = nodes[root][PARENT];
+ // re-use the rank as the class label
+ if( nodes[root][RANK] >= 0 )
+ nodes[root][RANK] = ~nclasses++;
+ labels[i] = ~nodes[root][RANK];
+ }
+
+ return nclasses;
+}
+
+
+//////////////////////////////////////////////////////////////////////////////
+
+// bridge C++ => C Seq API
+CV_EXPORTS schar* seqPush( CvSeq* seq, const void* element=0);
+CV_EXPORTS schar* seqPushFront( CvSeq* seq, const void* element=0);
+CV_EXPORTS void seqPop( CvSeq* seq, void* element=0);
+CV_EXPORTS void seqPopFront( CvSeq* seq, void* element=0);
+CV_EXPORTS void seqPopMulti( CvSeq* seq, void* elements,
+ int count, int in_front=0 );
+CV_EXPORTS void seqRemove( CvSeq* seq, int index );
+CV_EXPORTS void clearSeq( CvSeq* seq );
+CV_EXPORTS schar* getSeqElem( const CvSeq* seq, int index );
+CV_EXPORTS void seqRemoveSlice( CvSeq* seq, CvSlice slice );
+CV_EXPORTS void seqInsertSlice( CvSeq* seq, int before_index, const CvArr* from_arr );
+
+template<typename _Tp> inline Seq<_Tp>::Seq() : seq(0) {}
+template<typename _Tp> inline Seq<_Tp>::Seq( const CvSeq* _seq ) : seq((CvSeq*)_seq)
+{
+ CV_Assert(!_seq || _seq->elem_size == sizeof(_Tp));
+}
+
+template<typename _Tp> inline Seq<_Tp>::Seq( MemStorage& storage,
+ int headerSize )
+{
+ CV_Assert(headerSize >= (int)sizeof(CvSeq));
+ seq = cvCreateSeq(DataType<_Tp>::type, headerSize, sizeof(_Tp), storage);
+}
+
+template<typename _Tp> inline _Tp& Seq<_Tp>::operator [](int idx)
+{ return *(_Tp*)getSeqElem(seq, idx); }
+
+template<typename _Tp> inline const _Tp& Seq<_Tp>::operator [](int idx) const
+{ return *(_Tp*)getSeqElem(seq, idx); }
+
+template<typename _Tp> inline SeqIterator<_Tp> Seq<_Tp>::begin() const
+{ return SeqIterator<_Tp>(*this); }
+
+template<typename _Tp> inline SeqIterator<_Tp> Seq<_Tp>::end() const
+{ return SeqIterator<_Tp>(*this, true); }
+
+template<typename _Tp> inline size_t Seq<_Tp>::size() const
+{ return seq ? seq->total : 0; }
+
+template<typename _Tp> inline int Seq<_Tp>::type() const
+{ return seq ? CV_MAT_TYPE(seq->flags) : 0; }
+
+template<typename _Tp> inline int Seq<_Tp>::depth() const
+{ return seq ? CV_MAT_DEPTH(seq->flags) : 0; }
+
+template<typename _Tp> inline int Seq<_Tp>::channels() const
+{ return seq ? CV_MAT_CN(seq->flags) : 0; }
+
+template<typename _Tp> inline size_t Seq<_Tp>::elemSize() const
+{ return seq ? seq->elem_size : 0; }
+
+template<typename _Tp> inline size_t Seq<_Tp>::index(const _Tp& elem) const
+{ return cvSeqElemIdx(seq, &elem); }
+
+template<typename _Tp> inline void Seq<_Tp>::push_back(const _Tp& elem)
+{ cvSeqPush(seq, &elem); }
+
+template<typename _Tp> inline void Seq<_Tp>::push_front(const _Tp& elem)
+{ cvSeqPushFront(seq, &elem); }
+
+template<typename _Tp> inline void Seq<_Tp>::push_back(const _Tp* elem, size_t count)
+{ cvSeqPushMulti(seq, elem, (int)count, 0); }
+
+template<typename _Tp> inline void Seq<_Tp>::push_front(const _Tp* elem, size_t count)
+{ cvSeqPushMulti(seq, elem, (int)count, 1); }
+
+template<typename _Tp> inline _Tp& Seq<_Tp>::back()
+{ return *(_Tp*)getSeqElem(seq, -1); }
+
+template<typename _Tp> inline const _Tp& Seq<_Tp>::back() const
+{ return *(const _Tp*)getSeqElem(seq, -1); }
+
+template<typename _Tp> inline _Tp& Seq<_Tp>::front()
+{ return *(_Tp*)getSeqElem(seq, 0); }
+
+template<typename _Tp> inline const _Tp& Seq<_Tp>::front() const
+{ return *(const _Tp*)getSeqElem(seq, 0); }
+
+template<typename _Tp> inline bool Seq<_Tp>::empty() const
+{ return !seq || seq->total == 0; }
+
+template<typename _Tp> inline void Seq<_Tp>::clear()
+{ if(seq) clearSeq(seq); }
+
+template<typename _Tp> inline void Seq<_Tp>::pop_back()
+{ seqPop(seq); }
+
+template<typename _Tp> inline void Seq<_Tp>::pop_front()
+{ seqPopFront(seq); }
+
+template<typename _Tp> inline void Seq<_Tp>::pop_back(_Tp* elem, size_t count)
+{ seqPopMulti(seq, elem, (int)count, 0); }
+
+template<typename _Tp> inline void Seq<_Tp>::pop_front(_Tp* elem, size_t count)
+{ seqPopMulti(seq, elem, (int)count, 1); }
+
+template<typename _Tp> inline void Seq<_Tp>::insert(int idx, const _Tp& elem)
+{ seqInsert(seq, idx, &elem); }
+
+template<typename _Tp> inline void Seq<_Tp>::insert(int idx, const _Tp* elems, size_t count)
+{
+ CvMat m = cvMat(1, count, DataType<_Tp>::type, elems);
+ seqInsertSlice(seq, idx, &m);
+}
+
+template<typename _Tp> inline void Seq<_Tp>::remove(int idx)
+{ seqRemove(seq, idx); }
+
+template<typename _Tp> inline void Seq<_Tp>::remove(const Range& r)
+{ seqRemoveSlice(seq, r); }
+
+template<typename _Tp> inline void Seq<_Tp>::copyTo(vector<_Tp>& vec, const Range& range) const
+{
+ size_t len = !seq ? 0 : range == Range::all() ? seq->total : range.end - range.start;
+ vec.resize(len);
+ if( seq && len )
+ cvCvtSeqToArray(seq, &vec[0], range);
+}
+
+template<typename _Tp> inline Seq<_Tp>::operator vector<_Tp>() const
+{
+ vector<_Tp> vec;
+ copyTo(vec);
+ return vec;
+}
+
+template<typename _Tp> inline SeqIterator<_Tp>::SeqIterator()
+{ memset(this, 0, sizeof(*this)); }
+
+template<typename _Tp> inline SeqIterator<_Tp>::SeqIterator(const Seq<_Tp>& _seq, bool seekEnd)
+{
+ cvStartReadSeq(_seq.seq, this);
+ index = seekEnd ? _seq.seq->total : 0;
+}
+
+template<typename _Tp> inline void SeqIterator<_Tp>::seek(size_t pos)
+{
+ cvSetSeqReaderPos(this, (int)pos, false);
+ index = pos;
+}
+
+template<typename _Tp> inline size_t SeqIterator<_Tp>::tell() const
+{ return index; }
+
+template<typename _Tp> inline _Tp& SeqIterator<_Tp>::operator *()
+{ return *(_Tp*)ptr; }
+
+template<typename _Tp> inline const _Tp& SeqIterator<_Tp>::operator *() const
+{ return *(const _Tp*)ptr; }
+
+template<typename _Tp> inline SeqIterator<_Tp>& SeqIterator<_Tp>::operator ++()
+{
+ CV_NEXT_SEQ_ELEM(sizeof(_Tp), *this);
+ if( ++index >= seq->total*2 )
+ index = 0;
+ return *this;
+}
+
+template<typename _Tp> inline SeqIterator<_Tp> SeqIterator<_Tp>::operator ++(int) const
+{
+ SeqIterator<_Tp> it = *this;
+ ++*this;
+ return it;
+}
+
+template<typename _Tp> inline SeqIterator<_Tp>& SeqIterator<_Tp>::operator --()
+{
+ CV_PREV_SEQ_ELEM(sizeof(_Tp), *this);
+ if( --index < 0 )
+ index = seq->total*2-1;
+ return *this;
+}
+
+template<typename _Tp> inline SeqIterator<_Tp> SeqIterator<_Tp>::operator --(int) const
+{
+ SeqIterator<_Tp> it = *this;
+ --*this;
+ return it;
+}
+
+template<typename _Tp> inline SeqIterator<_Tp>& SeqIterator<_Tp>::operator +=(int delta)
+{
+ cvSetSeqReaderPos(this, delta, 1);
+ index += delta;
+ int n = seq->total*2;
+ if( index < 0 )
+ index += n;
+ if( index >= n )
+ index -= n;
+ return *this;
+}
+
+template<typename _Tp> inline SeqIterator<_Tp>& SeqIterator<_Tp>::operator -=(int delta)
+{
+ return (*this += -delta);
+}
+
+template<typename _Tp> inline ptrdiff_t operator - (const SeqIterator<_Tp>& a,
+ const SeqIterator<_Tp>& b)
+{
+ ptrdiff_t delta = a.index - b.index, n = a.seq->total;
+#if defined(__QNX__)
+ // No long std::abs(long) in QNX
+ long absdelta = (delta < 0) ? -delta : delta;
+ if( absdelta > n )
+#else
+ if( std::abs(static_cast<long>(delta)) > n )
+#endif
+ delta += delta < 0 ? n : -n;
+
+ return delta;
+}
+
+template<typename _Tp> inline bool operator == (const SeqIterator<_Tp>& a,
+ const SeqIterator<_Tp>& b)
+{
+ return a.seq == b.seq && a.index == b.index;
+}
+
+template<typename _Tp> inline bool operator != (const SeqIterator<_Tp>& a,
+ const SeqIterator<_Tp>& b)
+{
+ return !(a == b);
+}
+
+
+template<typename _ClsName> struct RTTIImpl
+{
+public:
+ static int isInstance(const void* ptr)
+ {
+ static _ClsName dummy;
+ static void* dummyp = &dummy;
+ union
+ {
+ const void* p;
+ const void** pp;
+ } a, b;
+ a.p = dummyp;
+ b.p = ptr;
+ return *a.pp == *b.pp;
+ }
+ static void release(void** dbptr)
+ {
+ if(dbptr && *dbptr)
+ {
+ delete (_ClsName*)*dbptr;
+ *dbptr = 0;
+ }
+ }
+ static void* read(CvFileStorage* fs, CvFileNode* n)
+ {
+ FileNode fn(fs, n);
+ _ClsName* obj = new _ClsName;
+ if(obj->read(fn))
+ return obj;
+ delete obj;
+ return 0;
+ }
+
+ static void write(CvFileStorage* _fs, const char* name, const void* ptr, CvAttrList)
+ {
+ if(ptr && _fs)
+ {
+ FileStorage fs(_fs);
+ fs.fs.addref();
+ ((const _ClsName*)ptr)->write(fs, string(name));
+ }
+ }
+
+ static void* clone(const void* ptr)
+ {
+ if(!ptr)
+ return 0;
+ return new _ClsName(*(const _ClsName*)ptr);
+ }
+};
+
+
+class CV_EXPORTS Formatter
+{
+public:
+ virtual ~Formatter() {}
+ virtual void write(std::ostream& out, const Mat& m, const int* params=0, int nparams=0) const = 0;
+ virtual void write(std::ostream& out, const void* data, int nelems, int type,
+ const int* params=0, int nparams=0) const = 0;
+ static const Formatter* get(const char* fmt="");
+ static const Formatter* setDefault(const Formatter* fmt);
+};
+
+
+struct CV_EXPORTS Formatted
+{
+ Formatted(const Mat& m, const Formatter* fmt,
+ const vector<int>& params);
+ Formatted(const Mat& m, const Formatter* fmt,
+ const int* params=0);
+ Mat mtx;
+ const Formatter* fmt;
+ vector<int> params;
+};
+
+static inline Formatted format(const Mat& mtx, const char* fmt,
+ const vector<int>& params=vector<int>())
+{
+ return Formatted(mtx, Formatter::get(fmt), params);
+}
+
+template<typename _Tp> static inline Formatted format(const vector<Point_<_Tp> >& vec,
+ const char* fmt, const vector<int>& params=vector<int>())
+{
+ return Formatted(Mat(vec), Formatter::get(fmt), params);
+}
+
+template<typename _Tp> static inline Formatted format(const vector<Point3_<_Tp> >& vec,
+ const char* fmt, const vector<int>& params=vector<int>())
+{
+ return Formatted(Mat(vec), Formatter::get(fmt), params);
+}
+
+/** \brief prints Mat to the output stream in Matlab notation
+ * use like
+ @verbatim
+ Mat my_mat = Mat::eye(3,3,CV_32F);
+ std::cout << my_mat;
+ @endverbatim
+ */
+static inline std::ostream& operator << (std::ostream& out, const Mat& mtx)
+{
+ Formatter::get()->write(out, mtx);
+ return out;
+}
+
+/** \brief prints Mat to the output stream allows in the specified notation (see format)
+ * use like
+ @verbatim
+ Mat my_mat = Mat::eye(3,3,CV_32F);
+ std::cout << my_mat;
+ @endverbatim
+ */
+static inline std::ostream& operator << (std::ostream& out, const Formatted& fmtd)
+{
+ fmtd.fmt->write(out, fmtd.mtx);
+ return out;
+}
+
+
+template<typename _Tp> static inline std::ostream& operator << (std::ostream& out,
+ const vector<Point_<_Tp> >& vec)
+{
+ Formatter::get()->write(out, Mat(vec));
+ return out;
+}
+
+
+template<typename _Tp> static inline std::ostream& operator << (std::ostream& out,
+ const vector<Point3_<_Tp> >& vec)
+{
+ Formatter::get()->write(out, Mat(vec));
+ return out;
+}
+
+
+/** Writes a Matx to an output stream.
+ */
+template<typename _Tp, int m, int n> inline std::ostream& operator<<(std::ostream& out, const Matx<_Tp, m, n>& matx)
+{
+ out << cv::Mat(matx);
+ return out;
+}
+
+/** Writes a point to an output stream in Matlab notation
+ */
+template<typename _Tp> inline std::ostream& operator<<(std::ostream& out, const Point_<_Tp>& p)
+{
+ out << "[" << p.x << ", " << p.y << "]";
+ return out;
+}
+
+/** Writes a point to an output stream in Matlab notation
+ */
+template<typename _Tp> inline std::ostream& operator<<(std::ostream& out, const Point3_<_Tp>& p)
+{
+ out << "[" << p.x << ", " << p.y << ", " << p.z << "]";
+ return out;
+}
+
+/** Writes a Vec to an output stream. Format example : [10, 20, 30]
+ */
+template<typename _Tp, int n> inline std::ostream& operator<<(std::ostream& out, const Vec<_Tp, n>& vec)
+{
+ out << "[";
+
+ if(Vec<_Tp, n>::depth < CV_32F)
+ {
+ for (int i = 0; i < n - 1; ++i) {
+ out << (int)vec[i] << ", ";
+ }
+ out << (int)vec[n-1] << "]";
+ }
+ else
+ {
+ for (int i = 0; i < n - 1; ++i) {
+ out << vec[i] << ", ";
+ }
+ out << vec[n-1] << "]";
+ }
+
+ return out;
+}
+
+/** Writes a Size_ to an output stream. Format example : [640 x 480]
+ */
+template<typename _Tp> inline std::ostream& operator<<(std::ostream& out, const Size_<_Tp>& size)
+{
+ out << "[" << size.width << " x " << size.height << "]";
+ return out;
+}
+
+/** Writes a Rect_ to an output stream. Format example : [640 x 480 from (10, 20)]
+ */
+template<typename _Tp> inline std::ostream& operator<<(std::ostream& out, const Rect_<_Tp>& rect)
+{
+ out << "[" << rect.width << " x " << rect.height << " from (" << rect.x << ", " << rect.y << ")]";
+ return out;
+}
+
+
+template<typename _Tp> inline Ptr<_Tp> Algorithm::create(const string& name)
+{
+ return _create(name).ptr<_Tp>();
+}
+
+template<typename _Tp>
+inline void Algorithm::set(const char* _name, const Ptr<_Tp>& value)
+{
+ Ptr<Algorithm> algo_ptr = value. template ptr<cv::Algorithm>();
+ if (algo_ptr.empty()) {
+ CV_Error( CV_StsUnsupportedFormat, "unknown/unsupported Ptr type of the second parameter of the method Algorithm::set");
+ }
+ info()->set(this, _name, ParamType<Algorithm>::type, &algo_ptr);
+}
+
+template<typename _Tp>
+inline void Algorithm::set(const string& _name, const Ptr<_Tp>& value)
+{
+ this->set<_Tp>(_name.c_str(), value);
+}
+
+template<typename _Tp>
+inline void Algorithm::setAlgorithm(const char* _name, const Ptr<_Tp>& value)
+{
+ Ptr<Algorithm> algo_ptr = value. template ptr<cv::Algorithm>();
+ if (algo_ptr.empty()) {
+ CV_Error( CV_StsUnsupportedFormat, "unknown/unsupported Ptr type of the second parameter of the method Algorithm::set");
+ }
+ info()->set(this, _name, ParamType<Algorithm>::type, &algo_ptr);
+}
+
+template<typename _Tp>
+inline void Algorithm::setAlgorithm(const string& _name, const Ptr<_Tp>& value)
+{
+ this->set<_Tp>(_name.c_str(), value);
+}
+
+template<typename _Tp> inline typename ParamType<_Tp>::member_type Algorithm::get(const string& _name) const
+{
+ typename ParamType<_Tp>::member_type value;
+ info()->get(this, _name.c_str(), ParamType<_Tp>::type, &value);
+ return value;
+}
+
+template<typename _Tp> inline typename ParamType<_Tp>::member_type Algorithm::get(const char* _name) const
+{
+ typename ParamType<_Tp>::member_type value;
+ info()->get(this, _name, ParamType<_Tp>::type, &value);
+ return value;
+}
+
+template<typename _Tp, typename _Base> inline void AlgorithmInfo::addParam(Algorithm& algo, const char* parameter,
+ Ptr<_Tp>& value, bool readOnly, Ptr<_Tp> (Algorithm::*getter)(), void (Algorithm::*setter)(const Ptr<_Tp>&),
+ const string& help)
+{
+ //TODO: static assert: _Tp inherits from _Base
+ addParam_(algo, parameter, ParamType<_Base>::type, &value, readOnly,
+ (Algorithm::Getter)getter, (Algorithm::Setter)setter, help);
+}
+
+template<typename _Tp> inline void AlgorithmInfo::addParam(Algorithm& algo, const char* parameter,
+ Ptr<_Tp>& value, bool readOnly, Ptr<_Tp> (Algorithm::*getter)(), void (Algorithm::*setter)(const Ptr<_Tp>&),
+ const string& help)
+{
+ //TODO: static assert: _Tp inherits from Algorithm
+ addParam_(algo, parameter, ParamType<Algorithm>::type, &value, readOnly,
+ (Algorithm::Getter)getter, (Algorithm::Setter)setter, help);
+}
+
+}
+
+#ifdef _MSC_VER
+# pragma warning(pop)
+#endif
+
+#endif // __cplusplus
+#endif
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/types_c.h b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/types_c.h
new file mode 100644
index 00000000..c21cd2c7
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/types_c.h
@@ -0,0 +1,1923 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_CORE_TYPES_H__
+#define __OPENCV_CORE_TYPES_H__
+
+#if !defined _CRT_SECURE_NO_DEPRECATE && defined _MSC_VER
+# if _MSC_VER > 1300
+# define _CRT_SECURE_NO_DEPRECATE /* to avoid multiple Visual Studio 2005 warnings */
+# endif
+#endif
+
+
+#ifndef SKIP_INCLUDES
+
+#include <assert.h>
+#include <stdlib.h>
+#include <string.h>
+#include <float.h>
+
+#if !defined _MSC_VER && !defined __BORLANDC__
+# include <stdint.h>
+#endif
+
+#if defined __ICL
+# define CV_ICC __ICL
+#elif defined __ICC
+# define CV_ICC __ICC
+#elif defined __ECL
+# define CV_ICC __ECL
+#elif defined __ECC
+# define CV_ICC __ECC
+#elif defined __INTEL_COMPILER
+# define CV_ICC __INTEL_COMPILER
+#endif
+
+#if defined CV_ICC && !defined CV_ENABLE_UNROLLED
+# define CV_ENABLE_UNROLLED 0
+#else
+# define CV_ENABLE_UNROLLED 1
+#endif
+
+#if (defined _M_X64 && defined _MSC_VER && _MSC_VER >= 1400) || (__GNUC__ >= 4 && defined __x86_64__)
+# if defined WIN32
+# include <intrin.h>
+# endif
+# if defined __SSE2__ || !defined __GNUC__
+# include <emmintrin.h>
+# endif
+#endif
+
+#if defined __BORLANDC__
+# include <fastmath.h>
+#else
+# include <math.h>
+#endif
+
+#ifdef HAVE_IPL
+# ifndef __IPL_H__
+# if defined WIN32 || defined _WIN32
+# include <ipl.h>
+# else
+# include <ipl/ipl.h>
+# endif
+# endif
+#elif defined __IPL_H__
+# define HAVE_IPL
+#endif
+
+#endif // SKIP_INCLUDES
+
+#if defined WIN32 || defined _WIN32
+# define CV_CDECL __cdecl
+# define CV_STDCALL __stdcall
+#else
+# define CV_CDECL
+# define CV_STDCALL
+#endif
+
+#ifndef CV_EXTERN_C
+# ifdef __cplusplus
+# define CV_EXTERN_C extern "C"
+# define CV_DEFAULT(val) = val
+# else
+# define CV_EXTERN_C
+# define CV_DEFAULT(val)
+# endif
+#endif
+
+#ifndef CV_EXTERN_C_FUNCPTR
+# ifdef __cplusplus
+# define CV_EXTERN_C_FUNCPTR(x) extern "C" { typedef x; }
+# else
+# define CV_EXTERN_C_FUNCPTR(x) typedef x
+# endif
+#endif
+
+#ifndef CV_INLINE
+# if defined __cplusplus
+# define CV_INLINE inline
+# elif defined _MSC_VER
+# define CV_INLINE __inline
+# else
+# define CV_INLINE static
+# endif
+#endif /* CV_INLINE */
+
+#if (defined WIN32 || defined _WIN32 || defined WINCE) && defined CVAPI_EXPORTS
+# define CV_EXPORTS __declspec(dllexport)
+#else
+# define CV_EXPORTS
+#endif
+
+#ifndef CVAPI
+# define CVAPI(rettype) CV_EXTERN_C CV_EXPORTS rettype CV_CDECL
+#endif
+
+#if defined _MSC_VER || defined __BORLANDC__
+ typedef __int64 int64;
+ typedef unsigned __int64 uint64;
+# define CV_BIG_INT(n) n##I64
+# define CV_BIG_UINT(n) n##UI64
+#else
+ typedef int64_t int64;
+ typedef uint64_t uint64;
+# define CV_BIG_INT(n) n##LL
+# define CV_BIG_UINT(n) n##ULL
+#endif
+
+#ifndef HAVE_IPL
+ typedef unsigned char uchar;
+ typedef unsigned short ushort;
+#endif
+
+typedef signed char schar;
+
+/* special informative macros for wrapper generators */
+#define CV_CARRAY(counter)
+#define CV_CUSTOM_CARRAY(args)
+#define CV_EXPORTS_W CV_EXPORTS
+#define CV_EXPORTS_W_SIMPLE CV_EXPORTS
+#define CV_EXPORTS_AS(synonym) CV_EXPORTS
+#define CV_EXPORTS_W_MAP CV_EXPORTS
+#define CV_IN_OUT
+#define CV_OUT
+#define CV_PROP
+#define CV_PROP_RW
+#define CV_WRAP
+#define CV_WRAP_AS(synonym)
+#define CV_WRAP_DEFAULT(value)
+
+/* CvArr* is used to pass arbitrary
+ * array-like data structures
+ * into functions where the particular
+ * array type is recognized at runtime:
+ */
+typedef void CvArr;
+
+typedef union Cv32suf
+{
+ int i;
+ unsigned u;
+ float f;
+}
+Cv32suf;
+
+typedef union Cv64suf
+{
+ int64 i;
+ uint64 u;
+ double f;
+}
+Cv64suf;
+
+typedef int CVStatus;
+
+enum {
+ CV_StsOk= 0, /* everithing is ok */
+ CV_StsBackTrace= -1, /* pseudo error for back trace */
+ CV_StsError= -2, /* unknown /unspecified error */
+ CV_StsInternal= -3, /* internal error (bad state) */
+ CV_StsNoMem= -4, /* insufficient memory */
+ CV_StsBadArg= -5, /* function arg/param is bad */
+ CV_StsBadFunc= -6, /* unsupported function */
+ CV_StsNoConv= -7, /* iter. didn't converge */
+ CV_StsAutoTrace= -8, /* tracing */
+ CV_HeaderIsNull= -9, /* image header is NULL */
+ CV_BadImageSize= -10, /* image size is invalid */
+ CV_BadOffset= -11, /* offset is invalid */
+ CV_BadDataPtr= -12, /**/
+ CV_BadStep= -13, /**/
+ CV_BadModelOrChSeq= -14, /**/
+ CV_BadNumChannels= -15, /**/
+ CV_BadNumChannel1U= -16, /**/
+ CV_BadDepth= -17, /**/
+ CV_BadAlphaChannel= -18, /**/
+ CV_BadOrder= -19, /**/
+ CV_BadOrigin= -20, /**/
+ CV_BadAlign= -21, /**/
+ CV_BadCallBack= -22, /**/
+ CV_BadTileSize= -23, /**/
+ CV_BadCOI= -24, /**/
+ CV_BadROISize= -25, /**/
+ CV_MaskIsTiled= -26, /**/
+ CV_StsNullPtr= -27, /* null pointer */
+ CV_StsVecLengthErr= -28, /* incorrect vector length */
+ CV_StsFilterStructContentErr= -29, /* incorr. filter structure content */
+ CV_StsKernelStructContentErr= -30, /* incorr. transform kernel content */
+ CV_StsFilterOffsetErr= -31, /* incorrect filter offset value */
+ CV_StsBadSize= -201, /* the input/output structure size is incorrect */
+ CV_StsDivByZero= -202, /* division by zero */
+ CV_StsInplaceNotSupported= -203, /* in-place operation is not supported */
+ CV_StsObjectNotFound= -204, /* request can't be completed */
+ CV_StsUnmatchedFormats= -205, /* formats of input/output arrays differ */
+ CV_StsBadFlag= -206, /* flag is wrong or not supported */
+ CV_StsBadPoint= -207, /* bad CvPoint */
+ CV_StsBadMask= -208, /* bad format of mask (neither 8uC1 nor 8sC1)*/
+ CV_StsUnmatchedSizes= -209, /* sizes of input/output structures do not match */
+ CV_StsUnsupportedFormat= -210, /* the data format/type is not supported by the function*/
+ CV_StsOutOfRange= -211, /* some of parameters are out of range */
+ CV_StsParseError= -212, /* invalid syntax/structure of the parsed file */
+ CV_StsNotImplemented= -213, /* the requested function/feature is not implemented */
+ CV_StsBadMemBlock= -214, /* an allocated block has been corrupted */
+ CV_StsAssert= -215, /* assertion failed */
+ CV_GpuNotSupported= -216,
+ CV_GpuApiCallError= -217,
+ CV_OpenGlNotSupported= -218,
+ CV_OpenGlApiCallError= -219,
+ CV_OpenCLDoubleNotSupported= -220,
+ CV_OpenCLInitError= -221,
+ CV_OpenCLNoAMDBlasFft= -222
+};
+
+/****************************************************************************************\
+* Common macros and inline functions *
+\****************************************************************************************/
+
+#ifdef HAVE_TEGRA_OPTIMIZATION
+# include "tegra_round.hpp"
+#endif
+
+#define CV_PI 3.1415926535897932384626433832795
+#define CV_LOG2 0.69314718055994530941723212145818
+
+#define CV_SWAP(a,b,t) ((t) = (a), (a) = (b), (b) = (t))
+
+#ifndef MIN
+# define MIN(a,b) ((a) > (b) ? (b) : (a))
+#endif
+
+#ifndef MAX
+# define MAX(a,b) ((a) < (b) ? (b) : (a))
+#endif
+
+/* min & max without jumps */
+#define CV_IMIN(a, b) ((a) ^ (((a)^(b)) & (((a) < (b)) - 1)))
+
+#define CV_IMAX(a, b) ((a) ^ (((a)^(b)) & (((a) > (b)) - 1)))
+
+/* absolute value without jumps */
+#ifndef __cplusplus
+# define CV_IABS(a) (((a) ^ ((a) < 0 ? -1 : 0)) - ((a) < 0 ? -1 : 0))
+#else
+# define CV_IABS(a) abs(a)
+#endif
+#define CV_CMP(a,b) (((a) > (b)) - ((a) < (b)))
+#define CV_SIGN(a) CV_CMP((a),0)
+
+#if defined __GNUC__ && defined __arm__ && (defined __ARM_PCS_VFP || defined __ARM_VFPV3__)
+# define CV_VFP 1
+#else
+# define CV_VFP 0
+#endif
+
+
+#if CV_VFP
+// 1. general scheme
+#define ARM_ROUND(_value, _asm_string) \
+ int res; \
+ float temp; \
+ (void)temp; \
+ asm(_asm_string : [res] "=r" (res), [temp] "=w" (temp) : [value] "w" (_value)); \
+ return res;
+// 2. version for double
+#ifdef __clang__
+#define ARM_ROUND_DBL(value) ARM_ROUND(value, "vcvtr.s32.f64 %[temp], %[value] \n vmov %[res], %[temp]")
+#else
+#define ARM_ROUND_DBL(value) ARM_ROUND(value, "vcvtr.s32.f64 %[temp], %P[value] \n vmov %[res], %[temp]")
+#endif
+// 3. version for float
+#define ARM_ROUND_FLT(value) ARM_ROUND(value, "vcvtr.s32.f32 %[temp], %[value]\n vmov %[res], %[temp]")
+#endif // CV_VFP
+
+CV_INLINE int cvRound( double value )
+{
+#if (defined _MSC_VER && defined _M_X64) || (defined __GNUC__ && defined __x86_64__ && defined __SSE2__ && !defined __APPLE__)
+ __m128d t = _mm_set_sd( value );
+ return _mm_cvtsd_si32(t);
+#elif defined _MSC_VER && defined _M_IX86
+ int t;
+ __asm
+ {
+ fld value;
+ fistp t;
+ }
+ return t;
+#elif defined _MSC_VER && defined _M_ARM && defined HAVE_TEGRA_OPTIMIZATION
+ TEGRA_ROUND(value);
+#elif defined CV_ICC || defined __GNUC__
+# ifdef HAVE_TEGRA_OPTIMIZATION
+ TEGRA_ROUND(value);
+# elif CV_VFP
+ ARM_ROUND_DBL(value)
+# else
+ return (int)lrint(value);
+# endif
+#else
+ double intpart, fractpart;
+ fractpart = modf(value, &intpart);
+ if ((fabs(fractpart) != 0.5) || ((((int)intpart) % 2) != 0))
+ return (int)(value + (value >= 0 ? 0.5 : -0.5));
+ else
+ return (int)intpart;
+#endif
+}
+
+#if defined __SSE2__ || (defined _M_IX86_FP && 2 == _M_IX86_FP)
+# include "emmintrin.h"
+#endif
+
+CV_INLINE int cvFloor( double value )
+{
+#if defined _MSC_VER && defined _M_X64 || (defined __GNUC__ && defined __SSE2__ && !defined __APPLE__)
+ __m128d t = _mm_set_sd( value );
+ int i = _mm_cvtsd_si32(t);
+ return i - _mm_movemask_pd(_mm_cmplt_sd(t, _mm_cvtsi32_sd(t,i)));
+#elif defined __GNUC__
+ int i = (int)value;
+ return i - (i > value);
+#else
+ int i = cvRound(value);
+ float diff = (float)(value - i);
+ return i - (diff < 0);
+#endif
+}
+
+
+CV_INLINE int cvCeil( double value )
+{
+#if defined _MSC_VER && defined _M_X64 || (defined __GNUC__ && defined __SSE2__&& !defined __APPLE__)
+ __m128d t = _mm_set_sd( value );
+ int i = _mm_cvtsd_si32(t);
+ return i + _mm_movemask_pd(_mm_cmplt_sd(_mm_cvtsi32_sd(t,i), t));
+#elif defined __GNUC__
+ int i = (int)value;
+ return i + (i < value);
+#else
+ int i = cvRound(value);
+ float diff = (float)(i - value);
+ return i + (diff < 0);
+#endif
+}
+
+#define cvInvSqrt(value) ((float)(1./sqrt(value)))
+#define cvSqrt(value) ((float)sqrt(value))
+
+CV_INLINE int cvIsNaN( double value )
+{
+ Cv64suf ieee754;
+ ieee754.f = value;
+ return ((unsigned)(ieee754.u >> 32) & 0x7fffffff) +
+ ((unsigned)ieee754.u != 0) > 0x7ff00000;
+}
+
+
+CV_INLINE int cvIsInf( double value )
+{
+ Cv64suf ieee754;
+ ieee754.f = value;
+ return ((unsigned)(ieee754.u >> 32) & 0x7fffffff) == 0x7ff00000 &&
+ (unsigned)ieee754.u == 0;
+}
+
+
+/*************** Random number generation *******************/
+
+typedef uint64 CvRNG;
+
+#define CV_RNG_COEFF 4164903690U
+
+CV_INLINE CvRNG cvRNG( int64 seed CV_DEFAULT(-1))
+{
+ CvRNG rng = seed ? (uint64)seed : (uint64)(int64)-1;
+ return rng;
+}
+
+/* Return random 32-bit unsigned integer: */
+CV_INLINE unsigned cvRandInt( CvRNG* rng )
+{
+ uint64 temp = *rng;
+ temp = (uint64)(unsigned)temp*CV_RNG_COEFF + (temp >> 32);
+ *rng = temp;
+ return (unsigned)temp;
+}
+
+/* Returns random floating-point number between 0 and 1: */
+CV_INLINE double cvRandReal( CvRNG* rng )
+{
+ return cvRandInt(rng)*2.3283064365386962890625e-10 /* 2^-32 */;
+}
+
+/****************************************************************************************\
+* Image type (IplImage) *
+\****************************************************************************************/
+
+#ifndef HAVE_IPL
+
+/*
+ * The following definitions (until #endif)
+ * is an extract from IPL headers.
+ * Copyright (c) 1995 Intel Corporation.
+ */
+#define IPL_DEPTH_SIGN 0x80000000
+
+#define IPL_DEPTH_1U 1
+#define IPL_DEPTH_8U 8
+#define IPL_DEPTH_16U 16
+#define IPL_DEPTH_32F 32
+
+#define IPL_DEPTH_8S (IPL_DEPTH_SIGN| 8)
+#define IPL_DEPTH_16S (IPL_DEPTH_SIGN|16)
+#define IPL_DEPTH_32S (IPL_DEPTH_SIGN|32)
+
+#define IPL_DATA_ORDER_PIXEL 0
+#define IPL_DATA_ORDER_PLANE 1
+
+#define IPL_ORIGIN_TL 0
+#define IPL_ORIGIN_BL 1
+
+#define IPL_ALIGN_4BYTES 4
+#define IPL_ALIGN_8BYTES 8
+#define IPL_ALIGN_16BYTES 16
+#define IPL_ALIGN_32BYTES 32
+
+#define IPL_ALIGN_DWORD IPL_ALIGN_4BYTES
+#define IPL_ALIGN_QWORD IPL_ALIGN_8BYTES
+
+#define IPL_BORDER_CONSTANT 0
+#define IPL_BORDER_REPLICATE 1
+#define IPL_BORDER_REFLECT 2
+#define IPL_BORDER_WRAP 3
+
+typedef struct _IplImage
+{
+ int nSize; /* sizeof(IplImage) */
+ int ID; /* version (=0)*/
+ int nChannels; /* Most of OpenCV functions support 1,2,3 or 4 channels */
+ int alphaChannel; /* Ignored by OpenCV */
+ int depth; /* Pixel depth in bits: IPL_DEPTH_8U, IPL_DEPTH_8S, IPL_DEPTH_16S,
+ IPL_DEPTH_32S, IPL_DEPTH_32F and IPL_DEPTH_64F are supported. */
+ char colorModel[4]; /* Ignored by OpenCV */
+ char channelSeq[4]; /* ditto */
+ int dataOrder; /* 0 - interleaved color channels, 1 - separate color channels.
+ cvCreateImage can only create interleaved images */
+ int origin; /* 0 - top-left origin,
+ 1 - bottom-left origin (Windows bitmaps style). */
+ int align; /* Alignment of image rows (4 or 8).
+ OpenCV ignores it and uses widthStep instead. */
+ int width; /* Image width in pixels. */
+ int height; /* Image height in pixels. */
+ struct _IplROI *roi; /* Image ROI. If NULL, the whole image is selected. */
+ struct _IplImage *maskROI; /* Must be NULL. */
+ void *imageId; /* " " */
+ struct _IplTileInfo *tileInfo; /* " " */
+ int imageSize; /* Image data size in bytes
+ (==image->height*image->widthStep
+ in case of interleaved data)*/
+ char *imageData; /* Pointer to aligned image data. */
+ int widthStep; /* Size of aligned image row in bytes. */
+ int BorderMode[4]; /* Ignored by OpenCV. */
+ int BorderConst[4]; /* Ditto. */
+ char *imageDataOrigin; /* Pointer to very origin of image data
+ (not necessarily aligned) -
+ needed for correct deallocation */
+}
+IplImage;
+
+typedef struct _IplTileInfo IplTileInfo;
+
+typedef struct _IplROI
+{
+ int coi; /* 0 - no COI (all channels are selected), 1 - 0th channel is selected ...*/
+ int xOffset;
+ int yOffset;
+ int width;
+ int height;
+}
+IplROI;
+
+typedef struct _IplConvKernel
+{
+ int nCols;
+ int nRows;
+ int anchorX;
+ int anchorY;
+ int *values;
+ int nShiftR;
+}
+IplConvKernel;
+
+typedef struct _IplConvKernelFP
+{
+ int nCols;
+ int nRows;
+ int anchorX;
+ int anchorY;
+ float *values;
+}
+IplConvKernelFP;
+
+#define IPL_IMAGE_HEADER 1
+#define IPL_IMAGE_DATA 2
+#define IPL_IMAGE_ROI 4
+
+#endif/*HAVE_IPL*/
+
+/* extra border mode */
+#define IPL_BORDER_REFLECT_101 4
+#define IPL_BORDER_TRANSPARENT 5
+
+#define IPL_IMAGE_MAGIC_VAL ((int)sizeof(IplImage))
+#define CV_TYPE_NAME_IMAGE "opencv-image"
+
+#define CV_IS_IMAGE_HDR(img) \
+ ((img) != NULL && ((const IplImage*)(img))->nSize == sizeof(IplImage))
+
+#define CV_IS_IMAGE(img) \
+ (CV_IS_IMAGE_HDR(img) && ((IplImage*)img)->imageData != NULL)
+
+/* for storing double-precision
+ floating point data in IplImage's */
+#define IPL_DEPTH_64F 64
+
+/* get reference to pixel at (col,row),
+ for multi-channel images (col) should be multiplied by number of channels */
+#define CV_IMAGE_ELEM( image, elemtype, row, col ) \
+ (((elemtype*)((image)->imageData + (image)->widthStep*(row)))[(col)])
+
+/****************************************************************************************\
+* Matrix type (CvMat) *
+\****************************************************************************************/
+
+#define CV_CN_MAX 512
+#define CV_CN_SHIFT 3
+#define CV_DEPTH_MAX (1 << CV_CN_SHIFT)
+
+#define CV_8U 0
+#define CV_8S 1
+#define CV_16U 2
+#define CV_16S 3
+#define CV_32S 4
+#define CV_32F 5
+#define CV_64F 6
+#define CV_USRTYPE1 7
+
+#define CV_MAT_DEPTH_MASK (CV_DEPTH_MAX - 1)
+#define CV_MAT_DEPTH(flags) ((flags) & CV_MAT_DEPTH_MASK)
+
+#define CV_MAKETYPE(depth,cn) (CV_MAT_DEPTH(depth) + (((cn)-1) << CV_CN_SHIFT))
+#define CV_MAKE_TYPE CV_MAKETYPE
+
+#define CV_8UC1 CV_MAKETYPE(CV_8U,1)
+#define CV_8UC2 CV_MAKETYPE(CV_8U,2)
+#define CV_8UC3 CV_MAKETYPE(CV_8U,3)
+#define CV_8UC4 CV_MAKETYPE(CV_8U,4)
+#define CV_8UC(n) CV_MAKETYPE(CV_8U,(n))
+
+#define CV_8SC1 CV_MAKETYPE(CV_8S,1)
+#define CV_8SC2 CV_MAKETYPE(CV_8S,2)
+#define CV_8SC3 CV_MAKETYPE(CV_8S,3)
+#define CV_8SC4 CV_MAKETYPE(CV_8S,4)
+#define CV_8SC(n) CV_MAKETYPE(CV_8S,(n))
+
+#define CV_16UC1 CV_MAKETYPE(CV_16U,1)
+#define CV_16UC2 CV_MAKETYPE(CV_16U,2)
+#define CV_16UC3 CV_MAKETYPE(CV_16U,3)
+#define CV_16UC4 CV_MAKETYPE(CV_16U,4)
+#define CV_16UC(n) CV_MAKETYPE(CV_16U,(n))
+
+#define CV_16SC1 CV_MAKETYPE(CV_16S,1)
+#define CV_16SC2 CV_MAKETYPE(CV_16S,2)
+#define CV_16SC3 CV_MAKETYPE(CV_16S,3)
+#define CV_16SC4 CV_MAKETYPE(CV_16S,4)
+#define CV_16SC(n) CV_MAKETYPE(CV_16S,(n))
+
+#define CV_32SC1 CV_MAKETYPE(CV_32S,1)
+#define CV_32SC2 CV_MAKETYPE(CV_32S,2)
+#define CV_32SC3 CV_MAKETYPE(CV_32S,3)
+#define CV_32SC4 CV_MAKETYPE(CV_32S,4)
+#define CV_32SC(n) CV_MAKETYPE(CV_32S,(n))
+
+#define CV_32FC1 CV_MAKETYPE(CV_32F,1)
+#define CV_32FC2 CV_MAKETYPE(CV_32F,2)
+#define CV_32FC3 CV_MAKETYPE(CV_32F,3)
+#define CV_32FC4 CV_MAKETYPE(CV_32F,4)
+#define CV_32FC(n) CV_MAKETYPE(CV_32F,(n))
+
+#define CV_64FC1 CV_MAKETYPE(CV_64F,1)
+#define CV_64FC2 CV_MAKETYPE(CV_64F,2)
+#define CV_64FC3 CV_MAKETYPE(CV_64F,3)
+#define CV_64FC4 CV_MAKETYPE(CV_64F,4)
+#define CV_64FC(n) CV_MAKETYPE(CV_64F,(n))
+
+#define CV_AUTO_STEP 0x7fffffff
+#define CV_WHOLE_ARR cvSlice( 0, 0x3fffffff )
+
+#define CV_MAT_CN_MASK ((CV_CN_MAX - 1) << CV_CN_SHIFT)
+#define CV_MAT_CN(flags) ((((flags) & CV_MAT_CN_MASK) >> CV_CN_SHIFT) + 1)
+#define CV_MAT_TYPE_MASK (CV_DEPTH_MAX*CV_CN_MAX - 1)
+#define CV_MAT_TYPE(flags) ((flags) & CV_MAT_TYPE_MASK)
+#define CV_MAT_CONT_FLAG_SHIFT 14
+#define CV_MAT_CONT_FLAG (1 << CV_MAT_CONT_FLAG_SHIFT)
+#define CV_IS_MAT_CONT(flags) ((flags) & CV_MAT_CONT_FLAG)
+#define CV_IS_CONT_MAT CV_IS_MAT_CONT
+#define CV_SUBMAT_FLAG_SHIFT 15
+#define CV_SUBMAT_FLAG (1 << CV_SUBMAT_FLAG_SHIFT)
+#define CV_IS_SUBMAT(flags) ((flags) & CV_MAT_SUBMAT_FLAG)
+
+#define CV_MAGIC_MASK 0xFFFF0000
+#define CV_MAT_MAGIC_VAL 0x42420000
+#define CV_TYPE_NAME_MAT "opencv-matrix"
+
+typedef struct CvMat
+{
+ int type;
+ int step;
+
+ /* for internal use only */
+ int* refcount;
+ int hdr_refcount;
+
+ union
+ {
+ uchar* ptr;
+ short* s;
+ int* i;
+ float* fl;
+ double* db;
+ } data;
+
+#ifdef __cplusplus
+ union
+ {
+ int rows;
+ int height;
+ };
+
+ union
+ {
+ int cols;
+ int width;
+ };
+#else
+ int rows;
+ int cols;
+#endif
+
+}
+CvMat;
+
+
+#define CV_IS_MAT_HDR(mat) \
+ ((mat) != NULL && \
+ (((const CvMat*)(mat))->type & CV_MAGIC_MASK) == CV_MAT_MAGIC_VAL && \
+ ((const CvMat*)(mat))->cols > 0 && ((const CvMat*)(mat))->rows > 0)
+
+#define CV_IS_MAT_HDR_Z(mat) \
+ ((mat) != NULL && \
+ (((const CvMat*)(mat))->type & CV_MAGIC_MASK) == CV_MAT_MAGIC_VAL && \
+ ((const CvMat*)(mat))->cols >= 0 && ((const CvMat*)(mat))->rows >= 0)
+
+#define CV_IS_MAT(mat) \
+ (CV_IS_MAT_HDR(mat) && ((const CvMat*)(mat))->data.ptr != NULL)
+
+#define CV_IS_MASK_ARR(mat) \
+ (((mat)->type & (CV_MAT_TYPE_MASK & ~CV_8SC1)) == 0)
+
+#define CV_ARE_TYPES_EQ(mat1, mat2) \
+ ((((mat1)->type ^ (mat2)->type) & CV_MAT_TYPE_MASK) == 0)
+
+#define CV_ARE_CNS_EQ(mat1, mat2) \
+ ((((mat1)->type ^ (mat2)->type) & CV_MAT_CN_MASK) == 0)
+
+#define CV_ARE_DEPTHS_EQ(mat1, mat2) \
+ ((((mat1)->type ^ (mat2)->type) & CV_MAT_DEPTH_MASK) == 0)
+
+#define CV_ARE_SIZES_EQ(mat1, mat2) \
+ ((mat1)->rows == (mat2)->rows && (mat1)->cols == (mat2)->cols)
+
+#define CV_IS_MAT_CONST(mat) \
+ (((mat)->rows|(mat)->cols) == 1)
+
+/* Size of each channel item,
+ 0x124489 = 1000 0100 0100 0010 0010 0001 0001 ~ array of sizeof(arr_type_elem) */
+#define CV_ELEM_SIZE1(type) \
+ ((((sizeof(size_t)<<28)|0x8442211) >> CV_MAT_DEPTH(type)*4) & 15)
+
+/* 0x3a50 = 11 10 10 01 01 00 00 ~ array of log2(sizeof(arr_type_elem)) */
+#define CV_ELEM_SIZE(type) \
+ (CV_MAT_CN(type) << ((((sizeof(size_t)/4+1)*16384|0x3a50) >> CV_MAT_DEPTH(type)*2) & 3))
+
+#define IPL2CV_DEPTH(depth) \
+ ((((CV_8U)+(CV_16U<<4)+(CV_32F<<8)+(CV_64F<<16)+(CV_8S<<20)+ \
+ (CV_16S<<24)+(CV_32S<<28)) >> ((((depth) & 0xF0) >> 2) + \
+ (((depth) & IPL_DEPTH_SIGN) ? 20 : 0))) & 15)
+
+/* Inline constructor. No data is allocated internally!!!
+ * (Use together with cvCreateData, or use cvCreateMat instead to
+ * get a matrix with allocated data):
+ */
+CV_INLINE CvMat cvMat( int rows, int cols, int type, void* data CV_DEFAULT(NULL))
+{
+ CvMat m;
+
+ assert( (unsigned)CV_MAT_DEPTH(type) <= CV_64F );
+ type = CV_MAT_TYPE(type);
+ m.type = CV_MAT_MAGIC_VAL | CV_MAT_CONT_FLAG | type;
+ m.cols = cols;
+ m.rows = rows;
+ m.step = m.cols*CV_ELEM_SIZE(type);
+ m.data.ptr = (uchar*)data;
+ m.refcount = NULL;
+ m.hdr_refcount = 0;
+
+ return m;
+}
+
+
+#define CV_MAT_ELEM_PTR_FAST( mat, row, col, pix_size ) \
+ (assert( (unsigned)(row) < (unsigned)(mat).rows && \
+ (unsigned)(col) < (unsigned)(mat).cols ), \
+ (mat).data.ptr + (size_t)(mat).step*(row) + (pix_size)*(col))
+
+#define CV_MAT_ELEM_PTR( mat, row, col ) \
+ CV_MAT_ELEM_PTR_FAST( mat, row, col, CV_ELEM_SIZE((mat).type) )
+
+#define CV_MAT_ELEM( mat, elemtype, row, col ) \
+ (*(elemtype*)CV_MAT_ELEM_PTR_FAST( mat, row, col, sizeof(elemtype)))
+
+
+CV_INLINE double cvmGet( const CvMat* mat, int row, int col )
+{
+ int type;
+
+ type = CV_MAT_TYPE(mat->type);
+ assert( (unsigned)row < (unsigned)mat->rows &&
+ (unsigned)col < (unsigned)mat->cols );
+
+ if( type == CV_32FC1 )
+ return ((float*)(void*)(mat->data.ptr + (size_t)mat->step*row))[col];
+ else
+ {
+ assert( type == CV_64FC1 );
+ return ((double*)(void*)(mat->data.ptr + (size_t)mat->step*row))[col];
+ }
+}
+
+
+CV_INLINE void cvmSet( CvMat* mat, int row, int col, double value )
+{
+ int type;
+ type = CV_MAT_TYPE(mat->type);
+ assert( (unsigned)row < (unsigned)mat->rows &&
+ (unsigned)col < (unsigned)mat->cols );
+
+ if( type == CV_32FC1 )
+ ((float*)(void*)(mat->data.ptr + (size_t)mat->step*row))[col] = (float)value;
+ else
+ {
+ assert( type == CV_64FC1 );
+ ((double*)(void*)(mat->data.ptr + (size_t)mat->step*row))[col] = value;
+ }
+}
+
+
+CV_INLINE int cvIplDepth( int type )
+{
+ int depth = CV_MAT_DEPTH(type);
+ return CV_ELEM_SIZE1(depth)*8 | (depth == CV_8S || depth == CV_16S ||
+ depth == CV_32S ? IPL_DEPTH_SIGN : 0);
+}
+
+
+/****************************************************************************************\
+* Multi-dimensional dense array (CvMatND) *
+\****************************************************************************************/
+
+#define CV_MATND_MAGIC_VAL 0x42430000
+#define CV_TYPE_NAME_MATND "opencv-nd-matrix"
+
+#define CV_MAX_DIM 32
+#define CV_MAX_DIM_HEAP 1024
+
+typedef struct CvMatND
+{
+ int type;
+ int dims;
+
+ int* refcount;
+ int hdr_refcount;
+
+ union
+ {
+ uchar* ptr;
+ float* fl;
+ double* db;
+ int* i;
+ short* s;
+ } data;
+
+ struct
+ {
+ int size;
+ int step;
+ }
+ dim[CV_MAX_DIM];
+}
+CvMatND;
+
+#define CV_IS_MATND_HDR(mat) \
+ ((mat) != NULL && (((const CvMatND*)(mat))->type & CV_MAGIC_MASK) == CV_MATND_MAGIC_VAL)
+
+#define CV_IS_MATND(mat) \
+ (CV_IS_MATND_HDR(mat) && ((const CvMatND*)(mat))->data.ptr != NULL)
+
+
+/****************************************************************************************\
+* Multi-dimensional sparse array (CvSparseMat) *
+\****************************************************************************************/
+
+#define CV_SPARSE_MAT_MAGIC_VAL 0x42440000
+#define CV_TYPE_NAME_SPARSE_MAT "opencv-sparse-matrix"
+
+struct CvSet;
+
+typedef struct CvSparseMat
+{
+ int type;
+ int dims;
+ int* refcount;
+ int hdr_refcount;
+
+ struct CvSet* heap;
+ void** hashtable;
+ int hashsize;
+ int valoffset;
+ int idxoffset;
+ int size[CV_MAX_DIM];
+}
+CvSparseMat;
+
+#define CV_IS_SPARSE_MAT_HDR(mat) \
+ ((mat) != NULL && \
+ (((const CvSparseMat*)(mat))->type & CV_MAGIC_MASK) == CV_SPARSE_MAT_MAGIC_VAL)
+
+#define CV_IS_SPARSE_MAT(mat) \
+ CV_IS_SPARSE_MAT_HDR(mat)
+
+/**************** iteration through a sparse array *****************/
+
+typedef struct CvSparseNode
+{
+ unsigned hashval;
+ struct CvSparseNode* next;
+}
+CvSparseNode;
+
+typedef struct CvSparseMatIterator
+{
+ CvSparseMat* mat;
+ CvSparseNode* node;
+ int curidx;
+}
+CvSparseMatIterator;
+
+#define CV_NODE_VAL(mat,node) ((void*)((uchar*)(node) + (mat)->valoffset))
+#define CV_NODE_IDX(mat,node) ((int*)((uchar*)(node) + (mat)->idxoffset))
+
+/****************************************************************************************\
+* Histogram *
+\****************************************************************************************/
+
+typedef int CvHistType;
+
+#define CV_HIST_MAGIC_VAL 0x42450000
+#define CV_HIST_UNIFORM_FLAG (1 << 10)
+
+/* indicates whether bin ranges are set already or not */
+#define CV_HIST_RANGES_FLAG (1 << 11)
+
+#define CV_HIST_ARRAY 0
+#define CV_HIST_SPARSE 1
+#define CV_HIST_TREE CV_HIST_SPARSE
+
+/* should be used as a parameter only,
+ it turns to CV_HIST_UNIFORM_FLAG of hist->type */
+#define CV_HIST_UNIFORM 1
+
+typedef struct CvHistogram
+{
+ int type;
+ CvArr* bins;
+ float thresh[CV_MAX_DIM][2]; /* For uniform histograms. */
+ float** thresh2; /* For non-uniform histograms. */
+ CvMatND mat; /* Embedded matrix header for array histograms. */
+}
+CvHistogram;
+
+#define CV_IS_HIST( hist ) \
+ ((hist) != NULL && \
+ (((CvHistogram*)(hist))->type & CV_MAGIC_MASK) == CV_HIST_MAGIC_VAL && \
+ (hist)->bins != NULL)
+
+#define CV_IS_UNIFORM_HIST( hist ) \
+ (((hist)->type & CV_HIST_UNIFORM_FLAG) != 0)
+
+#define CV_IS_SPARSE_HIST( hist ) \
+ CV_IS_SPARSE_MAT((hist)->bins)
+
+#define CV_HIST_HAS_RANGES( hist ) \
+ (((hist)->type & CV_HIST_RANGES_FLAG) != 0)
+
+/****************************************************************************************\
+* Other supplementary data type definitions *
+\****************************************************************************************/
+
+/*************************************** CvRect *****************************************/
+
+typedef struct CvRect
+{
+ int x;
+ int y;
+ int width;
+ int height;
+}
+CvRect;
+
+CV_INLINE CvRect cvRect( int x, int y, int width, int height )
+{
+ CvRect r;
+
+ r.x = x;
+ r.y = y;
+ r.width = width;
+ r.height = height;
+
+ return r;
+}
+
+
+CV_INLINE IplROI cvRectToROI( CvRect rect, int coi )
+{
+ IplROI roi;
+ roi.xOffset = rect.x;
+ roi.yOffset = rect.y;
+ roi.width = rect.width;
+ roi.height = rect.height;
+ roi.coi = coi;
+
+ return roi;
+}
+
+
+CV_INLINE CvRect cvROIToRect( IplROI roi )
+{
+ return cvRect( roi.xOffset, roi.yOffset, roi.width, roi.height );
+}
+
+/*********************************** CvTermCriteria *************************************/
+
+#define CV_TERMCRIT_ITER 1
+#define CV_TERMCRIT_NUMBER CV_TERMCRIT_ITER
+#define CV_TERMCRIT_EPS 2
+
+typedef struct CvTermCriteria
+{
+ int type; /* may be combination of
+ CV_TERMCRIT_ITER
+ CV_TERMCRIT_EPS */
+ int max_iter;
+ double epsilon;
+}
+CvTermCriteria;
+
+CV_INLINE CvTermCriteria cvTermCriteria( int type, int max_iter, double epsilon )
+{
+ CvTermCriteria t;
+
+ t.type = type;
+ t.max_iter = max_iter;
+ t.epsilon = (float)epsilon;
+
+ return t;
+}
+
+
+/******************************* CvPoint and variants ***********************************/
+
+typedef struct CvPoint
+{
+ int x;
+ int y;
+}
+CvPoint;
+
+
+CV_INLINE CvPoint cvPoint( int x, int y )
+{
+ CvPoint p;
+
+ p.x = x;
+ p.y = y;
+
+ return p;
+}
+
+
+typedef struct CvPoint2D32f
+{
+ float x;
+ float y;
+}
+CvPoint2D32f;
+
+
+CV_INLINE CvPoint2D32f cvPoint2D32f( double x, double y )
+{
+ CvPoint2D32f p;
+
+ p.x = (float)x;
+ p.y = (float)y;
+
+ return p;
+}
+
+
+CV_INLINE CvPoint2D32f cvPointTo32f( CvPoint point )
+{
+ return cvPoint2D32f( (float)point.x, (float)point.y );
+}
+
+
+CV_INLINE CvPoint cvPointFrom32f( CvPoint2D32f point )
+{
+ CvPoint ipt;
+ ipt.x = cvRound(point.x);
+ ipt.y = cvRound(point.y);
+
+ return ipt;
+}
+
+
+typedef struct CvPoint3D32f
+{
+ float x;
+ float y;
+ float z;
+}
+CvPoint3D32f;
+
+
+CV_INLINE CvPoint3D32f cvPoint3D32f( double x, double y, double z )
+{
+ CvPoint3D32f p;
+
+ p.x = (float)x;
+ p.y = (float)y;
+ p.z = (float)z;
+
+ return p;
+}
+
+
+typedef struct CvPoint2D64f
+{
+ double x;
+ double y;
+}
+CvPoint2D64f;
+
+
+CV_INLINE CvPoint2D64f cvPoint2D64f( double x, double y )
+{
+ CvPoint2D64f p;
+
+ p.x = x;
+ p.y = y;
+
+ return p;
+}
+
+
+typedef struct CvPoint3D64f
+{
+ double x;
+ double y;
+ double z;
+}
+CvPoint3D64f;
+
+
+CV_INLINE CvPoint3D64f cvPoint3D64f( double x, double y, double z )
+{
+ CvPoint3D64f p;
+
+ p.x = x;
+ p.y = y;
+ p.z = z;
+
+ return p;
+}
+
+
+/******************************** CvSize's & CvBox **************************************/
+
+typedef struct CvSize
+{
+ int width;
+ int height;
+}
+CvSize;
+
+CV_INLINE CvSize cvSize( int width, int height )
+{
+ CvSize s;
+
+ s.width = width;
+ s.height = height;
+
+ return s;
+}
+
+typedef struct CvSize2D32f
+{
+ float width;
+ float height;
+}
+CvSize2D32f;
+
+
+CV_INLINE CvSize2D32f cvSize2D32f( double width, double height )
+{
+ CvSize2D32f s;
+
+ s.width = (float)width;
+ s.height = (float)height;
+
+ return s;
+}
+
+typedef struct CvBox2D
+{
+ CvPoint2D32f center; /* Center of the box. */
+ CvSize2D32f size; /* Box width and length. */
+ float angle; /* Angle between the horizontal axis */
+ /* and the first side (i.e. length) in degrees */
+}
+CvBox2D;
+
+
+/* Line iterator state: */
+typedef struct CvLineIterator
+{
+ /* Pointer to the current point: */
+ uchar* ptr;
+
+ /* Bresenham algorithm state: */
+ int err;
+ int plus_delta;
+ int minus_delta;
+ int plus_step;
+ int minus_step;
+}
+CvLineIterator;
+
+
+
+/************************************* CvSlice ******************************************/
+
+typedef struct CvSlice
+{
+ int start_index, end_index;
+}
+CvSlice;
+
+CV_INLINE CvSlice cvSlice( int start, int end )
+{
+ CvSlice slice;
+ slice.start_index = start;
+ slice.end_index = end;
+
+ return slice;
+}
+
+#define CV_WHOLE_SEQ_END_INDEX 0x3fffffff
+#define CV_WHOLE_SEQ cvSlice(0, CV_WHOLE_SEQ_END_INDEX)
+
+
+/************************************* CvScalar *****************************************/
+
+typedef struct CvScalar
+{
+ double val[4];
+}
+CvScalar;
+
+CV_INLINE CvScalar cvScalar( double val0, double val1 CV_DEFAULT(0),
+ double val2 CV_DEFAULT(0), double val3 CV_DEFAULT(0))
+{
+ CvScalar scalar;
+ scalar.val[0] = val0; scalar.val[1] = val1;
+ scalar.val[2] = val2; scalar.val[3] = val3;
+ return scalar;
+}
+
+
+CV_INLINE CvScalar cvRealScalar( double val0 )
+{
+ CvScalar scalar;
+ scalar.val[0] = val0;
+ scalar.val[1] = scalar.val[2] = scalar.val[3] = 0;
+ return scalar;
+}
+
+CV_INLINE CvScalar cvScalarAll( double val0123 )
+{
+ CvScalar scalar;
+ scalar.val[0] = val0123;
+ scalar.val[1] = val0123;
+ scalar.val[2] = val0123;
+ scalar.val[3] = val0123;
+ return scalar;
+}
+
+/****************************************************************************************\
+* Dynamic Data structures *
+\****************************************************************************************/
+
+/******************************** Memory storage ****************************************/
+
+typedef struct CvMemBlock
+{
+ struct CvMemBlock* prev;
+ struct CvMemBlock* next;
+}
+CvMemBlock;
+
+#define CV_STORAGE_MAGIC_VAL 0x42890000
+
+typedef struct CvMemStorage
+{
+ int signature;
+ CvMemBlock* bottom; /* First allocated block. */
+ CvMemBlock* top; /* Current memory block - top of the stack. */
+ struct CvMemStorage* parent; /* We get new blocks from parent as needed. */
+ int block_size; /* Block size. */
+ int free_space; /* Remaining free space in current block. */
+}
+CvMemStorage;
+
+#define CV_IS_STORAGE(storage) \
+ ((storage) != NULL && \
+ (((CvMemStorage*)(storage))->signature & CV_MAGIC_MASK) == CV_STORAGE_MAGIC_VAL)
+
+
+typedef struct CvMemStoragePos
+{
+ CvMemBlock* top;
+ int free_space;
+}
+CvMemStoragePos;
+
+
+/*********************************** Sequence *******************************************/
+
+typedef struct CvSeqBlock
+{
+ struct CvSeqBlock* prev; /* Previous sequence block. */
+ struct CvSeqBlock* next; /* Next sequence block. */
+ int start_index; /* Index of the first element in the block + */
+ /* sequence->first->start_index. */
+ int count; /* Number of elements in the block. */
+ schar* data; /* Pointer to the first element of the block. */
+}
+CvSeqBlock;
+
+
+#define CV_TREE_NODE_FIELDS(node_type) \
+ int flags; /* Miscellaneous flags. */ \
+ int header_size; /* Size of sequence header. */ \
+ struct node_type* h_prev; /* Previous sequence. */ \
+ struct node_type* h_next; /* Next sequence. */ \
+ struct node_type* v_prev; /* 2nd previous sequence. */ \
+ struct node_type* v_next /* 2nd next sequence. */
+
+/*
+ Read/Write sequence.
+ Elements can be dynamically inserted to or deleted from the sequence.
+*/
+#define CV_SEQUENCE_FIELDS() \
+ CV_TREE_NODE_FIELDS(CvSeq); \
+ int total; /* Total number of elements. */ \
+ int elem_size; /* Size of sequence element in bytes. */ \
+ schar* block_max; /* Maximal bound of the last block. */ \
+ schar* ptr; /* Current write pointer. */ \
+ int delta_elems; /* Grow seq this many at a time. */ \
+ CvMemStorage* storage; /* Where the seq is stored. */ \
+ CvSeqBlock* free_blocks; /* Free blocks list. */ \
+ CvSeqBlock* first; /* Pointer to the first sequence block. */
+
+typedef struct CvSeq
+{
+ CV_SEQUENCE_FIELDS()
+}
+CvSeq;
+
+#define CV_TYPE_NAME_SEQ "opencv-sequence"
+#define CV_TYPE_NAME_SEQ_TREE "opencv-sequence-tree"
+
+/*************************************** Set ********************************************/
+/*
+ Set.
+ Order is not preserved. There can be gaps between sequence elements.
+ After the element has been inserted it stays in the same place all the time.
+ The MSB(most-significant or sign bit) of the first field (flags) is 0 iff the element exists.
+*/
+#define CV_SET_ELEM_FIELDS(elem_type) \
+ int flags; \
+ struct elem_type* next_free;
+
+typedef struct CvSetElem
+{
+ CV_SET_ELEM_FIELDS(CvSetElem)
+}
+CvSetElem;
+
+#define CV_SET_FIELDS() \
+ CV_SEQUENCE_FIELDS() \
+ CvSetElem* free_elems; \
+ int active_count;
+
+typedef struct CvSet
+{
+ CV_SET_FIELDS()
+}
+CvSet;
+
+
+#define CV_SET_ELEM_IDX_MASK ((1 << 26) - 1)
+#define CV_SET_ELEM_FREE_FLAG (1 << (sizeof(int)*8-1))
+
+/* Checks whether the element pointed by ptr belongs to a set or not */
+#define CV_IS_SET_ELEM( ptr ) (((CvSetElem*)(ptr))->flags >= 0)
+
+/************************************* Graph ********************************************/
+
+/*
+ We represent a graph as a set of vertices.
+ Vertices contain their adjacency lists (more exactly, pointers to first incoming or
+ outcoming edge (or 0 if isolated vertex)). Edges are stored in another set.
+ There is a singly-linked list of incoming/outcoming edges for each vertex.
+
+ Each edge consists of
+
+ o Two pointers to the starting and ending vertices
+ (vtx[0] and vtx[1] respectively).
+
+ A graph may be oriented or not. In the latter case, edges between
+ vertex i to vertex j are not distinguished during search operations.
+
+ o Two pointers to next edges for the starting and ending vertices, where
+ next[0] points to the next edge in the vtx[0] adjacency list and
+ next[1] points to the next edge in the vtx[1] adjacency list.
+*/
+#define CV_GRAPH_EDGE_FIELDS() \
+ int flags; \
+ float weight; \
+ struct CvGraphEdge* next[2]; \
+ struct CvGraphVtx* vtx[2];
+
+
+#define CV_GRAPH_VERTEX_FIELDS() \
+ int flags; \
+ struct CvGraphEdge* first;
+
+
+typedef struct CvGraphEdge
+{
+ CV_GRAPH_EDGE_FIELDS()
+}
+CvGraphEdge;
+
+typedef struct CvGraphVtx
+{
+ CV_GRAPH_VERTEX_FIELDS()
+}
+CvGraphVtx;
+
+typedef struct CvGraphVtx2D
+{
+ CV_GRAPH_VERTEX_FIELDS()
+ CvPoint2D32f* ptr;
+}
+CvGraphVtx2D;
+
+/*
+ Graph is "derived" from the set (this is set a of vertices)
+ and includes another set (edges)
+*/
+#define CV_GRAPH_FIELDS() \
+ CV_SET_FIELDS() \
+ CvSet* edges;
+
+typedef struct CvGraph
+{
+ CV_GRAPH_FIELDS()
+}
+CvGraph;
+
+#define CV_TYPE_NAME_GRAPH "opencv-graph"
+
+/*********************************** Chain/Countour *************************************/
+
+typedef struct CvChain
+{
+ CV_SEQUENCE_FIELDS()
+ CvPoint origin;
+}
+CvChain;
+
+#define CV_CONTOUR_FIELDS() \
+ CV_SEQUENCE_FIELDS() \
+ CvRect rect; \
+ int color; \
+ int reserved[3];
+
+typedef struct CvContour
+{
+ CV_CONTOUR_FIELDS()
+}
+CvContour;
+
+typedef CvContour CvPoint2DSeq;
+
+/****************************************************************************************\
+* Sequence types *
+\****************************************************************************************/
+
+#define CV_SEQ_MAGIC_VAL 0x42990000
+
+#define CV_IS_SEQ(seq) \
+ ((seq) != NULL && (((CvSeq*)(seq))->flags & CV_MAGIC_MASK) == CV_SEQ_MAGIC_VAL)
+
+#define CV_SET_MAGIC_VAL 0x42980000
+#define CV_IS_SET(set) \
+ ((set) != NULL && (((CvSeq*)(set))->flags & CV_MAGIC_MASK) == CV_SET_MAGIC_VAL)
+
+#define CV_SEQ_ELTYPE_BITS 12
+#define CV_SEQ_ELTYPE_MASK ((1 << CV_SEQ_ELTYPE_BITS) - 1)
+
+#define CV_SEQ_ELTYPE_POINT CV_32SC2 /* (x,y) */
+#define CV_SEQ_ELTYPE_CODE CV_8UC1 /* freeman code: 0..7 */
+#define CV_SEQ_ELTYPE_GENERIC 0
+#define CV_SEQ_ELTYPE_PTR CV_USRTYPE1
+#define CV_SEQ_ELTYPE_PPOINT CV_SEQ_ELTYPE_PTR /* &(x,y) */
+#define CV_SEQ_ELTYPE_INDEX CV_32SC1 /* #(x,y) */
+#define CV_SEQ_ELTYPE_GRAPH_EDGE 0 /* &next_o, &next_d, &vtx_o, &vtx_d */
+#define CV_SEQ_ELTYPE_GRAPH_VERTEX 0 /* first_edge, &(x,y) */
+#define CV_SEQ_ELTYPE_TRIAN_ATR 0 /* vertex of the binary tree */
+#define CV_SEQ_ELTYPE_CONNECTED_COMP 0 /* connected component */
+#define CV_SEQ_ELTYPE_POINT3D CV_32FC3 /* (x,y,z) */
+
+#define CV_SEQ_KIND_BITS 2
+#define CV_SEQ_KIND_MASK (((1 << CV_SEQ_KIND_BITS) - 1)<<CV_SEQ_ELTYPE_BITS)
+
+/* types of sequences */
+#define CV_SEQ_KIND_GENERIC (0 << CV_SEQ_ELTYPE_BITS)
+#define CV_SEQ_KIND_CURVE (1 << CV_SEQ_ELTYPE_BITS)
+#define CV_SEQ_KIND_BIN_TREE (2 << CV_SEQ_ELTYPE_BITS)
+
+/* types of sparse sequences (sets) */
+#define CV_SEQ_KIND_GRAPH (1 << CV_SEQ_ELTYPE_BITS)
+#define CV_SEQ_KIND_SUBDIV2D (2 << CV_SEQ_ELTYPE_BITS)
+
+#define CV_SEQ_FLAG_SHIFT (CV_SEQ_KIND_BITS + CV_SEQ_ELTYPE_BITS)
+
+/* flags for curves */
+#define CV_SEQ_FLAG_CLOSED (1 << CV_SEQ_FLAG_SHIFT)
+#define CV_SEQ_FLAG_SIMPLE (0 << CV_SEQ_FLAG_SHIFT)
+#define CV_SEQ_FLAG_CONVEX (0 << CV_SEQ_FLAG_SHIFT)
+#define CV_SEQ_FLAG_HOLE (2 << CV_SEQ_FLAG_SHIFT)
+
+/* flags for graphs */
+#define CV_GRAPH_FLAG_ORIENTED (1 << CV_SEQ_FLAG_SHIFT)
+
+#define CV_GRAPH CV_SEQ_KIND_GRAPH
+#define CV_ORIENTED_GRAPH (CV_SEQ_KIND_GRAPH|CV_GRAPH_FLAG_ORIENTED)
+
+/* point sets */
+#define CV_SEQ_POINT_SET (CV_SEQ_KIND_GENERIC| CV_SEQ_ELTYPE_POINT)
+#define CV_SEQ_POINT3D_SET (CV_SEQ_KIND_GENERIC| CV_SEQ_ELTYPE_POINT3D)
+#define CV_SEQ_POLYLINE (CV_SEQ_KIND_CURVE | CV_SEQ_ELTYPE_POINT)
+#define CV_SEQ_POLYGON (CV_SEQ_FLAG_CLOSED | CV_SEQ_POLYLINE )
+#define CV_SEQ_CONTOUR CV_SEQ_POLYGON
+#define CV_SEQ_SIMPLE_POLYGON (CV_SEQ_FLAG_SIMPLE | CV_SEQ_POLYGON )
+
+/* chain-coded curves */
+#define CV_SEQ_CHAIN (CV_SEQ_KIND_CURVE | CV_SEQ_ELTYPE_CODE)
+#define CV_SEQ_CHAIN_CONTOUR (CV_SEQ_FLAG_CLOSED | CV_SEQ_CHAIN)
+
+/* binary tree for the contour */
+#define CV_SEQ_POLYGON_TREE (CV_SEQ_KIND_BIN_TREE | CV_SEQ_ELTYPE_TRIAN_ATR)
+
+/* sequence of the connected components */
+#define CV_SEQ_CONNECTED_COMP (CV_SEQ_KIND_GENERIC | CV_SEQ_ELTYPE_CONNECTED_COMP)
+
+/* sequence of the integer numbers */
+#define CV_SEQ_INDEX (CV_SEQ_KIND_GENERIC | CV_SEQ_ELTYPE_INDEX)
+
+#define CV_SEQ_ELTYPE( seq ) ((seq)->flags & CV_SEQ_ELTYPE_MASK)
+#define CV_SEQ_KIND( seq ) ((seq)->flags & CV_SEQ_KIND_MASK )
+
+/* flag checking */
+#define CV_IS_SEQ_INDEX( seq ) ((CV_SEQ_ELTYPE(seq) == CV_SEQ_ELTYPE_INDEX) && \
+ (CV_SEQ_KIND(seq) == CV_SEQ_KIND_GENERIC))
+
+#define CV_IS_SEQ_CURVE( seq ) (CV_SEQ_KIND(seq) == CV_SEQ_KIND_CURVE)
+#define CV_IS_SEQ_CLOSED( seq ) (((seq)->flags & CV_SEQ_FLAG_CLOSED) != 0)
+#define CV_IS_SEQ_CONVEX( seq ) 0
+#define CV_IS_SEQ_HOLE( seq ) (((seq)->flags & CV_SEQ_FLAG_HOLE) != 0)
+#define CV_IS_SEQ_SIMPLE( seq ) 1
+
+/* type checking macros */
+#define CV_IS_SEQ_POINT_SET( seq ) \
+ ((CV_SEQ_ELTYPE(seq) == CV_32SC2 || CV_SEQ_ELTYPE(seq) == CV_32FC2))
+
+#define CV_IS_SEQ_POINT_SUBSET( seq ) \
+ (CV_IS_SEQ_INDEX( seq ) || CV_SEQ_ELTYPE(seq) == CV_SEQ_ELTYPE_PPOINT)
+
+#define CV_IS_SEQ_POLYLINE( seq ) \
+ (CV_SEQ_KIND(seq) == CV_SEQ_KIND_CURVE && CV_IS_SEQ_POINT_SET(seq))
+
+#define CV_IS_SEQ_POLYGON( seq ) \
+ (CV_IS_SEQ_POLYLINE(seq) && CV_IS_SEQ_CLOSED(seq))
+
+#define CV_IS_SEQ_CHAIN( seq ) \
+ (CV_SEQ_KIND(seq) == CV_SEQ_KIND_CURVE && (seq)->elem_size == 1)
+
+#define CV_IS_SEQ_CONTOUR( seq ) \
+ (CV_IS_SEQ_CLOSED(seq) && (CV_IS_SEQ_POLYLINE(seq) || CV_IS_SEQ_CHAIN(seq)))
+
+#define CV_IS_SEQ_CHAIN_CONTOUR( seq ) \
+ (CV_IS_SEQ_CHAIN( seq ) && CV_IS_SEQ_CLOSED( seq ))
+
+#define CV_IS_SEQ_POLYGON_TREE( seq ) \
+ (CV_SEQ_ELTYPE (seq) == CV_SEQ_ELTYPE_TRIAN_ATR && \
+ CV_SEQ_KIND( seq ) == CV_SEQ_KIND_BIN_TREE )
+
+#define CV_IS_GRAPH( seq ) \
+ (CV_IS_SET(seq) && CV_SEQ_KIND((CvSet*)(seq)) == CV_SEQ_KIND_GRAPH)
+
+#define CV_IS_GRAPH_ORIENTED( seq ) \
+ (((seq)->flags & CV_GRAPH_FLAG_ORIENTED) != 0)
+
+#define CV_IS_SUBDIV2D( seq ) \
+ (CV_IS_SET(seq) && CV_SEQ_KIND((CvSet*)(seq)) == CV_SEQ_KIND_SUBDIV2D)
+
+/****************************************************************************************/
+/* Sequence writer & reader */
+/****************************************************************************************/
+
+#define CV_SEQ_WRITER_FIELDS() \
+ int header_size; \
+ CvSeq* seq; /* the sequence written */ \
+ CvSeqBlock* block; /* current block */ \
+ schar* ptr; /* pointer to free space */ \
+ schar* block_min; /* pointer to the beginning of block*/\
+ schar* block_max; /* pointer to the end of block */
+
+typedef struct CvSeqWriter
+{
+ CV_SEQ_WRITER_FIELDS()
+}
+CvSeqWriter;
+
+
+#define CV_SEQ_READER_FIELDS() \
+ int header_size; \
+ CvSeq* seq; /* sequence, beign read */ \
+ CvSeqBlock* block; /* current block */ \
+ schar* ptr; /* pointer to element be read next */ \
+ schar* block_min; /* pointer to the beginning of block */\
+ schar* block_max; /* pointer to the end of block */ \
+ int delta_index;/* = seq->first->start_index */ \
+ schar* prev_elem; /* pointer to previous element */
+
+
+typedef struct CvSeqReader
+{
+ CV_SEQ_READER_FIELDS()
+}
+CvSeqReader;
+
+/****************************************************************************************/
+/* Operations on sequences */
+/****************************************************************************************/
+
+#define CV_SEQ_ELEM( seq, elem_type, index ) \
+/* assert gives some guarantee that <seq> parameter is valid */ \
+( assert(sizeof((seq)->first[0]) == sizeof(CvSeqBlock) && \
+ (seq)->elem_size == sizeof(elem_type)), \
+ (elem_type*)((seq)->first && (unsigned)index < \
+ (unsigned)((seq)->first->count) ? \
+ (seq)->first->data + (index) * sizeof(elem_type) : \
+ cvGetSeqElem( (CvSeq*)(seq), (index) )))
+#define CV_GET_SEQ_ELEM( elem_type, seq, index ) CV_SEQ_ELEM( (seq), elem_type, (index) )
+
+/* Add element to sequence: */
+#define CV_WRITE_SEQ_ELEM_VAR( elem_ptr, writer ) \
+{ \
+ if( (writer).ptr >= (writer).block_max ) \
+ { \
+ cvCreateSeqBlock( &writer); \
+ } \
+ memcpy((writer).ptr, elem_ptr, (writer).seq->elem_size);\
+ (writer).ptr += (writer).seq->elem_size; \
+}
+
+#define CV_WRITE_SEQ_ELEM( elem, writer ) \
+{ \
+ assert( (writer).seq->elem_size == sizeof(elem)); \
+ if( (writer).ptr >= (writer).block_max ) \
+ { \
+ cvCreateSeqBlock( &writer); \
+ } \
+ assert( (writer).ptr <= (writer).block_max - sizeof(elem));\
+ memcpy((writer).ptr, &(elem), sizeof(elem)); \
+ (writer).ptr += sizeof(elem); \
+}
+
+
+/* Move reader position forward: */
+#define CV_NEXT_SEQ_ELEM( elem_size, reader ) \
+{ \
+ if( ((reader).ptr += (elem_size)) >= (reader).block_max ) \
+ { \
+ cvChangeSeqBlock( &(reader), 1 ); \
+ } \
+}
+
+
+/* Move reader position backward: */
+#define CV_PREV_SEQ_ELEM( elem_size, reader ) \
+{ \
+ if( ((reader).ptr -= (elem_size)) < (reader).block_min ) \
+ { \
+ cvChangeSeqBlock( &(reader), -1 ); \
+ } \
+}
+
+/* Read element and move read position forward: */
+#define CV_READ_SEQ_ELEM( elem, reader ) \
+{ \
+ assert( (reader).seq->elem_size == sizeof(elem)); \
+ memcpy( &(elem), (reader).ptr, sizeof((elem))); \
+ CV_NEXT_SEQ_ELEM( sizeof(elem), reader ) \
+}
+
+/* Read element and move read position backward: */
+#define CV_REV_READ_SEQ_ELEM( elem, reader ) \
+{ \
+ assert( (reader).seq->elem_size == sizeof(elem)); \
+ memcpy(&(elem), (reader).ptr, sizeof((elem))); \
+ CV_PREV_SEQ_ELEM( sizeof(elem), reader ) \
+}
+
+
+#define CV_READ_CHAIN_POINT( _pt, reader ) \
+{ \
+ (_pt) = (reader).pt; \
+ if( (reader).ptr ) \
+ { \
+ CV_READ_SEQ_ELEM( (reader).code, (reader)); \
+ assert( ((reader).code & ~7) == 0 ); \
+ (reader).pt.x += (reader).deltas[(int)(reader).code][0]; \
+ (reader).pt.y += (reader).deltas[(int)(reader).code][1]; \
+ } \
+}
+
+#define CV_CURRENT_POINT( reader ) (*((CvPoint*)((reader).ptr)))
+#define CV_PREV_POINT( reader ) (*((CvPoint*)((reader).prev_elem)))
+
+#define CV_READ_EDGE( pt1, pt2, reader ) \
+{ \
+ assert( sizeof(pt1) == sizeof(CvPoint) && \
+ sizeof(pt2) == sizeof(CvPoint) && \
+ reader.seq->elem_size == sizeof(CvPoint)); \
+ (pt1) = CV_PREV_POINT( reader ); \
+ (pt2) = CV_CURRENT_POINT( reader ); \
+ (reader).prev_elem = (reader).ptr; \
+ CV_NEXT_SEQ_ELEM( sizeof(CvPoint), (reader)); \
+}
+
+/************ Graph macros ************/
+
+/* Return next graph edge for given vertex: */
+#define CV_NEXT_GRAPH_EDGE( edge, vertex ) \
+ (assert((edge)->vtx[0] == (vertex) || (edge)->vtx[1] == (vertex)), \
+ (edge)->next[(edge)->vtx[1] == (vertex)])
+
+
+
+/****************************************************************************************\
+* Data structures for persistence (a.k.a serialization) functionality *
+\****************************************************************************************/
+
+/* "black box" file storage */
+typedef struct CvFileStorage CvFileStorage;
+
+/* Storage flags: */
+#define CV_STORAGE_READ 0
+#define CV_STORAGE_WRITE 1
+#define CV_STORAGE_WRITE_TEXT CV_STORAGE_WRITE
+#define CV_STORAGE_WRITE_BINARY CV_STORAGE_WRITE
+#define CV_STORAGE_APPEND 2
+#define CV_STORAGE_MEMORY 4
+#define CV_STORAGE_FORMAT_MASK (7<<3)
+#define CV_STORAGE_FORMAT_AUTO 0
+#define CV_STORAGE_FORMAT_XML 8
+#define CV_STORAGE_FORMAT_YAML 16
+
+/* List of attributes: */
+typedef struct CvAttrList
+{
+ const char** attr; /* NULL-terminated array of (attribute_name,attribute_value) pairs. */
+ struct CvAttrList* next; /* Pointer to next chunk of the attributes list. */
+}
+CvAttrList;
+
+CV_INLINE CvAttrList cvAttrList( const char** attr CV_DEFAULT(NULL),
+ CvAttrList* next CV_DEFAULT(NULL) )
+{
+ CvAttrList l;
+ l.attr = attr;
+ l.next = next;
+
+ return l;
+}
+
+struct CvTypeInfo;
+
+#define CV_NODE_NONE 0
+#define CV_NODE_INT 1
+#define CV_NODE_INTEGER CV_NODE_INT
+#define CV_NODE_REAL 2
+#define CV_NODE_FLOAT CV_NODE_REAL
+#define CV_NODE_STR 3
+#define CV_NODE_STRING CV_NODE_STR
+#define CV_NODE_REF 4 /* not used */
+#define CV_NODE_SEQ 5
+#define CV_NODE_MAP 6
+#define CV_NODE_TYPE_MASK 7
+
+#define CV_NODE_TYPE(flags) ((flags) & CV_NODE_TYPE_MASK)
+
+/* file node flags */
+#define CV_NODE_FLOW 8 /* Used only for writing structures in YAML format. */
+#define CV_NODE_USER 16
+#define CV_NODE_EMPTY 32
+#define CV_NODE_NAMED 64
+
+#define CV_NODE_IS_INT(flags) (CV_NODE_TYPE(flags) == CV_NODE_INT)
+#define CV_NODE_IS_REAL(flags) (CV_NODE_TYPE(flags) == CV_NODE_REAL)
+#define CV_NODE_IS_STRING(flags) (CV_NODE_TYPE(flags) == CV_NODE_STRING)
+#define CV_NODE_IS_SEQ(flags) (CV_NODE_TYPE(flags) == CV_NODE_SEQ)
+#define CV_NODE_IS_MAP(flags) (CV_NODE_TYPE(flags) == CV_NODE_MAP)
+#define CV_NODE_IS_COLLECTION(flags) (CV_NODE_TYPE(flags) >= CV_NODE_SEQ)
+#define CV_NODE_IS_FLOW(flags) (((flags) & CV_NODE_FLOW) != 0)
+#define CV_NODE_IS_EMPTY(flags) (((flags) & CV_NODE_EMPTY) != 0)
+#define CV_NODE_IS_USER(flags) (((flags) & CV_NODE_USER) != 0)
+#define CV_NODE_HAS_NAME(flags) (((flags) & CV_NODE_NAMED) != 0)
+
+#define CV_NODE_SEQ_SIMPLE 256
+#define CV_NODE_SEQ_IS_SIMPLE(seq) (((seq)->flags & CV_NODE_SEQ_SIMPLE) != 0)
+
+typedef struct CvString
+{
+ int len;
+ char* ptr;
+}
+CvString;
+
+/* All the keys (names) of elements in the readed file storage
+ are stored in the hash to speed up the lookup operations: */
+typedef struct CvStringHashNode
+{
+ unsigned hashval;
+ CvString str;
+ struct CvStringHashNode* next;
+}
+CvStringHashNode;
+
+typedef struct CvGenericHash CvFileNodeHash;
+
+/* Basic element of the file storage - scalar or collection: */
+typedef struct CvFileNode
+{
+ int tag;
+ struct CvTypeInfo* info; /* type information
+ (only for user-defined object, for others it is 0) */
+ union
+ {
+ double f; /* scalar floating-point number */
+ int i; /* scalar integer number */
+ CvString str; /* text string */
+ CvSeq* seq; /* sequence (ordered collection of file nodes) */
+ CvFileNodeHash* map; /* map (collection of named file nodes) */
+ } data;
+}
+CvFileNode;
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+typedef int (CV_CDECL *CvIsInstanceFunc)( const void* struct_ptr );
+typedef void (CV_CDECL *CvReleaseFunc)( void** struct_dblptr );
+typedef void* (CV_CDECL *CvReadFunc)( CvFileStorage* storage, CvFileNode* node );
+typedef void (CV_CDECL *CvWriteFunc)( CvFileStorage* storage, const char* name,
+ const void* struct_ptr, CvAttrList attributes );
+typedef void* (CV_CDECL *CvCloneFunc)( const void* struct_ptr );
+#ifdef __cplusplus
+}
+#endif
+
+typedef struct CvTypeInfo
+{
+ int flags;
+ int header_size;
+ struct CvTypeInfo* prev;
+ struct CvTypeInfo* next;
+ const char* type_name;
+ CvIsInstanceFunc is_instance;
+ CvReleaseFunc release;
+ CvReadFunc read;
+ CvWriteFunc write;
+ CvCloneFunc clone;
+}
+CvTypeInfo;
+
+
+/**** System data types ******/
+
+typedef struct CvPluginFuncInfo
+{
+ void** func_addr;
+ void* default_func_addr;
+ const char* func_names;
+ int search_modules;
+ int loaded_from;
+}
+CvPluginFuncInfo;
+
+typedef struct CvModuleInfo
+{
+ struct CvModuleInfo* next;
+ const char* name;
+ const char* version;
+ CvPluginFuncInfo* func_tab;
+}
+CvModuleInfo;
+
+#endif /*__OPENCV_CORE_TYPES_H__*/
+
+/* End of file. */
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/version.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/version.hpp
new file mode 100644
index 00000000..2dbb3c34
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/version.hpp
@@ -0,0 +1,72 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// Intel License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright( C) 2000, Intel Corporation, all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of Intel Corporation may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+//(including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort(including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+/*
+ definition of the current version of OpenCV
+ Usefull to test in user programs
+*/
+
+#ifndef __OPENCV_VERSION_HPP__
+#define __OPENCV_VERSION_HPP__
+
+#define CV_VERSION_EPOCH 2
+#define CV_VERSION_MAJOR 4
+#define CV_VERSION_MINOR 13
+#define CV_VERSION_REVISION 0
+
+#define CVAUX_STR_EXP(__A) #__A
+#define CVAUX_STR(__A) CVAUX_STR_EXP(__A)
+
+#define CVAUX_STRW_EXP(__A) L#__A
+#define CVAUX_STRW(__A) CVAUX_STRW_EXP(__A)
+
+#if CV_VERSION_REVISION
+# define CV_VERSION CVAUX_STR(CV_VERSION_EPOCH) "." CVAUX_STR(CV_VERSION_MAJOR) "." CVAUX_STR(CV_VERSION_MINOR) "." CVAUX_STR(CV_VERSION_REVISION)
+#else
+# define CV_VERSION CVAUX_STR(CV_VERSION_EPOCH) "." CVAUX_STR(CV_VERSION_MAJOR) "." CVAUX_STR(CV_VERSION_MINOR)
+#endif
+
+/* old style version constants*/
+#define CV_MAJOR_VERSION CV_VERSION_EPOCH
+#define CV_MINOR_VERSION CV_VERSION_MAJOR
+#define CV_SUBMINOR_VERSION CV_VERSION_MINOR
+
+#endif
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/wimage.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/wimage.hpp
new file mode 100644
index 00000000..c7afa8c5
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/core/wimage.hpp
@@ -0,0 +1,621 @@
+///////////////////////////////////////////////////////////////////////////////
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to
+// this license. If you do not agree to this license, do not download,
+// install, copy or use the software.
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2008, Google, all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of Intel Corporation or contributors may not be used to endorse
+// or promote products derived from this software without specific
+// prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is"
+// and any express or implied warranties, including, but not limited to, the
+// implied warranties of merchantability and fitness for a particular purpose
+// are disclaimed. In no event shall the Intel Corporation or contributors be
+// liable for any direct, indirect, incidental, special, exemplary, or
+// consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+
+
+/////////////////////////////////////////////////////////////////////////////////
+//
+// Image class which provides a thin layer around an IplImage. The goals
+// of the class design are:
+// 1. All the data has explicit ownership to avoid memory leaks
+// 2. No hidden allocations or copies for performance.
+// 3. Easy access to OpenCV methods (which will access IPP if available)
+// 4. Can easily treat external data as an image
+// 5. Easy to create images which are subsets of other images
+// 6. Fast pixel access which can take advantage of number of channels
+// if known at compile time.
+//
+// The WImage class is the image class which provides the data accessors.
+// The 'W' comes from the fact that it is also a wrapper around the popular
+// but inconvenient IplImage class. A WImage can be constructed either using a
+// WImageBuffer class which allocates and frees the data,
+// or using a WImageView class which constructs a subimage or a view into
+// external data. The view class does no memory management. Each class
+// actually has two versions, one when the number of channels is known at
+// compile time and one when it isn't. Using the one with the number of
+// channels specified can provide some compile time optimizations by using the
+// fact that the number of channels is a constant.
+//
+// We use the convention (c,r) to refer to column c and row r with (0,0) being
+// the upper left corner. This is similar to standard Euclidean coordinates
+// with the first coordinate varying in the horizontal direction and the second
+// coordinate varying in the vertical direction.
+// Thus (c,r) is usually in the domain [0, width) X [0, height)
+//
+// Example usage:
+// WImageBuffer3_b im(5,7); // Make a 5X7 3 channel image of type uchar
+// WImageView3_b sub_im(im, 2,2, 3,3); // 3X3 submatrix
+// vector<float> vec(10, 3.0f);
+// WImageView1_f user_im(&vec[0], 2, 5); // 2X5 image w/ supplied data
+//
+// im.SetZero(); // same as cvSetZero(im.Ipl())
+// *im(2, 3) = 15; // Modify the element at column 2, row 3
+// MySetRand(&sub_im);
+//
+// // Copy the second row into the first. This can be done with no memory
+// // allocation and will use SSE if IPP is available.
+// int w = im.Width();
+// im.View(0,0, w,1).CopyFrom(im.View(0,1, w,1));
+//
+// // Doesn't care about source of data since using WImage
+// void MySetRand(WImage_b* im) { // Works with any number of channels
+// for (int r = 0; r < im->Height(); ++r) {
+// float* row = im->Row(r);
+// for (int c = 0; c < im->Width(); ++c) {
+// for (int ch = 0; ch < im->Channels(); ++ch, ++row) {
+// *row = uchar(rand() & 255);
+// }
+// }
+// }
+// }
+//
+// Functions that are not part of the basic image allocation, viewing, and
+// access should come from OpenCV, except some useful functions that are not
+// part of OpenCV can be found in wimage_util.h
+#ifndef __OPENCV_CORE_WIMAGE_HPP__
+#define __OPENCV_CORE_WIMAGE_HPP__
+
+#include "opencv2/core/core_c.h"
+
+#ifdef __cplusplus
+
+namespace cv {
+
+template <typename T> class WImage;
+template <typename T> class WImageBuffer;
+template <typename T> class WImageView;
+
+template<typename T, int C> class WImageC;
+template<typename T, int C> class WImageBufferC;
+template<typename T, int C> class WImageViewC;
+
+// Commonly used typedefs.
+typedef WImage<uchar> WImage_b;
+typedef WImageView<uchar> WImageView_b;
+typedef WImageBuffer<uchar> WImageBuffer_b;
+
+typedef WImageC<uchar, 1> WImage1_b;
+typedef WImageViewC<uchar, 1> WImageView1_b;
+typedef WImageBufferC<uchar, 1> WImageBuffer1_b;
+
+typedef WImageC<uchar, 3> WImage3_b;
+typedef WImageViewC<uchar, 3> WImageView3_b;
+typedef WImageBufferC<uchar, 3> WImageBuffer3_b;
+
+typedef WImage<float> WImage_f;
+typedef WImageView<float> WImageView_f;
+typedef WImageBuffer<float> WImageBuffer_f;
+
+typedef WImageC<float, 1> WImage1_f;
+typedef WImageViewC<float, 1> WImageView1_f;
+typedef WImageBufferC<float, 1> WImageBuffer1_f;
+
+typedef WImageC<float, 3> WImage3_f;
+typedef WImageViewC<float, 3> WImageView3_f;
+typedef WImageBufferC<float, 3> WImageBuffer3_f;
+
+// There isn't a standard for signed and unsigned short so be more
+// explicit in the typename for these cases.
+typedef WImage<short> WImage_16s;
+typedef WImageView<short> WImageView_16s;
+typedef WImageBuffer<short> WImageBuffer_16s;
+
+typedef WImageC<short, 1> WImage1_16s;
+typedef WImageViewC<short, 1> WImageView1_16s;
+typedef WImageBufferC<short, 1> WImageBuffer1_16s;
+
+typedef WImageC<short, 3> WImage3_16s;
+typedef WImageViewC<short, 3> WImageView3_16s;
+typedef WImageBufferC<short, 3> WImageBuffer3_16s;
+
+typedef WImage<ushort> WImage_16u;
+typedef WImageView<ushort> WImageView_16u;
+typedef WImageBuffer<ushort> WImageBuffer_16u;
+
+typedef WImageC<ushort, 1> WImage1_16u;
+typedef WImageViewC<ushort, 1> WImageView1_16u;
+typedef WImageBufferC<ushort, 1> WImageBuffer1_16u;
+
+typedef WImageC<ushort, 3> WImage3_16u;
+typedef WImageViewC<ushort, 3> WImageView3_16u;
+typedef WImageBufferC<ushort, 3> WImageBuffer3_16u;
+
+//
+// WImage definitions
+//
+// This WImage class gives access to the data it refers to. It can be
+// constructed either by allocating the data with a WImageBuffer class or
+// using the WImageView class to refer to a subimage or outside data.
+template<typename T>
+class WImage
+{
+public:
+ typedef T BaseType;
+
+ // WImage is an abstract class with no other virtual methods so make the
+ // destructor virtual.
+ virtual ~WImage() = 0;
+
+ // Accessors
+ IplImage* Ipl() {return image_; }
+ const IplImage* Ipl() const {return image_; }
+ T* ImageData() { return reinterpret_cast<T*>(image_->imageData); }
+ const T* ImageData() const {
+ return reinterpret_cast<const T*>(image_->imageData);
+ }
+
+ int Width() const {return image_->width; }
+ int Height() const {return image_->height; }
+
+ // WidthStep is the number of bytes to go to the pixel with the next y coord
+ int WidthStep() const {return image_->widthStep; }
+
+ int Channels() const {return image_->nChannels; }
+ int ChannelSize() const {return sizeof(T); } // number of bytes per channel
+
+ // Number of bytes per pixel
+ int PixelSize() const {return Channels() * ChannelSize(); }
+
+ // Return depth type (e.g. IPL_DEPTH_8U, IPL_DEPTH_32F) which is the number
+ // of bits per channel and with the signed bit set.
+ // This is known at compile time using specializations.
+ int Depth() const;
+
+ inline const T* Row(int r) const {
+ return reinterpret_cast<T*>(image_->imageData + r*image_->widthStep);
+ }
+
+ inline T* Row(int r) {
+ return reinterpret_cast<T*>(image_->imageData + r*image_->widthStep);
+ }
+
+ // Pixel accessors which returns a pointer to the start of the channel
+ inline T* operator() (int c, int r) {
+ return reinterpret_cast<T*>(image_->imageData + r*image_->widthStep) +
+ c*Channels();
+ }
+
+ inline const T* operator() (int c, int r) const {
+ return reinterpret_cast<T*>(image_->imageData + r*image_->widthStep) +
+ c*Channels();
+ }
+
+ // Copy the contents from another image which is just a convenience to cvCopy
+ void CopyFrom(const WImage<T>& src) { cvCopy(src.Ipl(), image_); }
+
+ // Set contents to zero which is just a convenient to cvSetZero
+ void SetZero() { cvSetZero(image_); }
+
+ // Construct a view into a region of this image
+ WImageView<T> View(int c, int r, int width, int height);
+
+protected:
+ // Disallow copy and assignment
+ WImage(const WImage&);
+ void operator=(const WImage&);
+
+ explicit WImage(IplImage* img) : image_(img) {
+ assert(!img || img->depth == Depth());
+ }
+
+ void SetIpl(IplImage* image) {
+ assert(!image || image->depth == Depth());
+ image_ = image;
+ }
+
+ IplImage* image_;
+};
+
+
+
+// Image class when both the pixel type and number of channels
+// are known at compile time. This wrapper will speed up some of the operations
+// like accessing individual pixels using the () operator.
+template<typename T, int C>
+class WImageC : public WImage<T>
+{
+public:
+ typedef typename WImage<T>::BaseType BaseType;
+ enum { kChannels = C };
+
+ explicit WImageC(IplImage* img) : WImage<T>(img) {
+ assert(!img || img->nChannels == Channels());
+ }
+
+ // Construct a view into a region of this image
+ WImageViewC<T, C> View(int c, int r, int width, int height);
+
+ // Copy the contents from another image which is just a convenience to cvCopy
+ void CopyFrom(const WImageC<T, C>& src) {
+ cvCopy(src.Ipl(), WImage<T>::image_);
+ }
+
+ // WImageC is an abstract class with no other virtual methods so make the
+ // destructor virtual.
+ virtual ~WImageC() = 0;
+
+ int Channels() const {return C; }
+
+protected:
+ // Disallow copy and assignment
+ WImageC(const WImageC&);
+ void operator=(const WImageC&);
+
+ void SetIpl(IplImage* image) {
+ assert(!image || image->depth == WImage<T>::Depth());
+ WImage<T>::SetIpl(image);
+ }
+};
+
+//
+// WImageBuffer definitions
+//
+// Image class which owns the data, so it can be allocated and is always
+// freed. It cannot be copied but can be explicity cloned.
+//
+template<typename T>
+class WImageBuffer : public WImage<T>
+{
+public:
+ typedef typename WImage<T>::BaseType BaseType;
+
+ // Default constructor which creates an object that can be
+ WImageBuffer() : WImage<T>(0) {}
+
+ WImageBuffer(int width, int height, int nchannels) : WImage<T>(0) {
+ Allocate(width, height, nchannels);
+ }
+
+ // Constructor which takes ownership of a given IplImage so releases
+ // the image on destruction.
+ explicit WImageBuffer(IplImage* img) : WImage<T>(img) {}
+
+ // Allocate an image. Does nothing if current size is the same as
+ // the new size.
+ void Allocate(int width, int height, int nchannels);
+
+ // Set the data to point to an image, releasing the old data
+ void SetIpl(IplImage* img) {
+ ReleaseImage();
+ WImage<T>::SetIpl(img);
+ }
+
+ // Clone an image which reallocates the image if of a different dimension.
+ void CloneFrom(const WImage<T>& src) {
+ Allocate(src.Width(), src.Height(), src.Channels());
+ CopyFrom(src);
+ }
+
+ ~WImageBuffer() {
+ ReleaseImage();
+ }
+
+ // Release the image if it isn't null.
+ void ReleaseImage() {
+ if (WImage<T>::image_) {
+ IplImage* image = WImage<T>::image_;
+ cvReleaseImage(&image);
+ WImage<T>::SetIpl(0);
+ }
+ }
+
+ bool IsNull() const {return WImage<T>::image_ == NULL; }
+
+private:
+ // Disallow copy and assignment
+ WImageBuffer(const WImageBuffer&);
+ void operator=(const WImageBuffer&);
+};
+
+// Like a WImageBuffer class but when the number of channels is known
+// at compile time.
+template<typename T, int C>
+class WImageBufferC : public WImageC<T, C>
+{
+public:
+ typedef typename WImage<T>::BaseType BaseType;
+ enum { kChannels = C };
+
+ // Default constructor which creates an object that can be
+ WImageBufferC() : WImageC<T, C>(0) {}
+
+ WImageBufferC(int width, int height) : WImageC<T, C>(0) {
+ Allocate(width, height);
+ }
+
+ // Constructor which takes ownership of a given IplImage so releases
+ // the image on destruction.
+ explicit WImageBufferC(IplImage* img) : WImageC<T, C>(img) {}
+
+ // Allocate an image. Does nothing if current size is the same as
+ // the new size.
+ void Allocate(int width, int height);
+
+ // Set the data to point to an image, releasing the old data
+ void SetIpl(IplImage* img) {
+ ReleaseImage();
+ WImageC<T, C>::SetIpl(img);
+ }
+
+ // Clone an image which reallocates the image if of a different dimension.
+ void CloneFrom(const WImageC<T, C>& src) {
+ Allocate(src.Width(), src.Height());
+ CopyFrom(src);
+ }
+
+ ~WImageBufferC() {
+ ReleaseImage();
+ }
+
+ // Release the image if it isn't null.
+ void ReleaseImage() {
+ if (WImage<T>::image_) {
+ IplImage* image = WImage<T>::image_;
+ cvReleaseImage(&image);
+ WImageC<T, C>::SetIpl(0);
+ }
+ }
+
+ bool IsNull() const {return WImage<T>::image_ == NULL; }
+
+private:
+ // Disallow copy and assignment
+ WImageBufferC(const WImageBufferC&);
+ void operator=(const WImageBufferC&);
+};
+
+//
+// WImageView definitions
+//
+// View into an image class which allows treating a subimage as an image
+// or treating external data as an image
+//
+template<typename T>
+class WImageView : public WImage<T>
+{
+public:
+ typedef typename WImage<T>::BaseType BaseType;
+
+ // Construct a subimage. No checks are done that the subimage lies
+ // completely inside the original image.
+ WImageView(WImage<T>* img, int c, int r, int width, int height);
+
+ // Refer to external data.
+ // If not given width_step assumed to be same as width.
+ WImageView(T* data, int width, int height, int channels, int width_step = -1);
+
+ // Refer to external data. This does NOT take ownership
+ // of the supplied IplImage.
+ WImageView(IplImage* img) : WImage<T>(img) {}
+
+ // Copy constructor
+ WImageView(const WImage<T>& img) : WImage<T>(0) {
+ header_ = *(img.Ipl());
+ WImage<T>::SetIpl(&header_);
+ }
+
+ WImageView& operator=(const WImage<T>& img) {
+ header_ = *(img.Ipl());
+ WImage<T>::SetIpl(&header_);
+ return *this;
+ }
+
+protected:
+ IplImage header_;
+};
+
+
+template<typename T, int C>
+class WImageViewC : public WImageC<T, C>
+{
+public:
+ typedef typename WImage<T>::BaseType BaseType;
+ enum { kChannels = C };
+
+ // Default constructor needed for vectors of views.
+ WImageViewC();
+
+ virtual ~WImageViewC() {}
+
+ // Construct a subimage. No checks are done that the subimage lies
+ // completely inside the original image.
+ WImageViewC(WImageC<T, C>* img,
+ int c, int r, int width, int height);
+
+ // Refer to external data
+ WImageViewC(T* data, int width, int height, int width_step = -1);
+
+ // Refer to external data. This does NOT take ownership
+ // of the supplied IplImage.
+ WImageViewC(IplImage* img) : WImageC<T, C>(img) {}
+
+ // Copy constructor which does a shallow copy to allow multiple views
+ // of same data. gcc-4.1.1 gets confused if both versions of
+ // the constructor and assignment operator are not provided.
+ WImageViewC(const WImageC<T, C>& img) : WImageC<T, C>(0) {
+ header_ = *(img.Ipl());
+ WImageC<T, C>::SetIpl(&header_);
+ }
+ WImageViewC(const WImageViewC<T, C>& img) : WImageC<T, C>(0) {
+ header_ = *(img.Ipl());
+ WImageC<T, C>::SetIpl(&header_);
+ }
+
+ WImageViewC& operator=(const WImageC<T, C>& img) {
+ header_ = *(img.Ipl());
+ WImageC<T, C>::SetIpl(&header_);
+ return *this;
+ }
+ WImageViewC& operator=(const WImageViewC<T, C>& img) {
+ header_ = *(img.Ipl());
+ WImageC<T, C>::SetIpl(&header_);
+ return *this;
+ }
+
+protected:
+ IplImage header_;
+};
+
+
+// Specializations for depth
+template<>
+inline int WImage<uchar>::Depth() const {return IPL_DEPTH_8U; }
+template<>
+inline int WImage<signed char>::Depth() const {return IPL_DEPTH_8S; }
+template<>
+inline int WImage<short>::Depth() const {return IPL_DEPTH_16S; }
+template<>
+inline int WImage<ushort>::Depth() const {return IPL_DEPTH_16U; }
+template<>
+inline int WImage<int>::Depth() const {return IPL_DEPTH_32S; }
+template<>
+inline int WImage<float>::Depth() const {return IPL_DEPTH_32F; }
+template<>
+inline int WImage<double>::Depth() const {return IPL_DEPTH_64F; }
+
+//
+// Pure virtual destructors still need to be defined.
+//
+template<typename T> inline WImage<T>::~WImage() {}
+template<typename T, int C> inline WImageC<T, C>::~WImageC() {}
+
+//
+// Allocate ImageData
+//
+template<typename T>
+inline void WImageBuffer<T>::Allocate(int width, int height, int nchannels)
+{
+ if (IsNull() || WImage<T>::Width() != width ||
+ WImage<T>::Height() != height || WImage<T>::Channels() != nchannels) {
+ ReleaseImage();
+ WImage<T>::image_ = cvCreateImage(cvSize(width, height),
+ WImage<T>::Depth(), nchannels);
+ }
+}
+
+template<typename T, int C>
+inline void WImageBufferC<T, C>::Allocate(int width, int height)
+{
+ if (IsNull() || WImage<T>::Width() != width || WImage<T>::Height() != height) {
+ ReleaseImage();
+ WImageC<T, C>::SetIpl(cvCreateImage(cvSize(width, height),WImage<T>::Depth(), C));
+ }
+}
+
+//
+// ImageView methods
+//
+template<typename T>
+WImageView<T>::WImageView(WImage<T>* img, int c, int r, int width, int height)
+ : WImage<T>(0)
+{
+ header_ = *(img->Ipl());
+ header_.imageData = reinterpret_cast<char*>((*img)(c, r));
+ header_.width = width;
+ header_.height = height;
+ WImage<T>::SetIpl(&header_);
+}
+
+template<typename T>
+WImageView<T>::WImageView(T* data, int width, int height, int nchannels, int width_step)
+ : WImage<T>(0)
+{
+ cvInitImageHeader(&header_, cvSize(width, height), WImage<T>::Depth(), nchannels);
+ header_.imageData = reinterpret_cast<char*>(data);
+ if (width_step > 0) {
+ header_.widthStep = width_step;
+ }
+ WImage<T>::SetIpl(&header_);
+}
+
+template<typename T, int C>
+WImageViewC<T, C>::WImageViewC(WImageC<T, C>* img, int c, int r, int width, int height)
+ : WImageC<T, C>(0)
+{
+ header_ = *(img->Ipl());
+ header_.imageData = reinterpret_cast<char*>((*img)(c, r));
+ header_.width = width;
+ header_.height = height;
+ WImageC<T, C>::SetIpl(&header_);
+}
+
+template<typename T, int C>
+WImageViewC<T, C>::WImageViewC() : WImageC<T, C>(0) {
+ cvInitImageHeader(&header_, cvSize(0, 0), WImage<T>::Depth(), C);
+ header_.imageData = reinterpret_cast<char*>(0);
+ WImageC<T, C>::SetIpl(&header_);
+}
+
+template<typename T, int C>
+WImageViewC<T, C>::WImageViewC(T* data, int width, int height, int width_step)
+ : WImageC<T, C>(0)
+{
+ cvInitImageHeader(&header_, cvSize(width, height), WImage<T>::Depth(), C);
+ header_.imageData = reinterpret_cast<char*>(data);
+ if (width_step > 0) {
+ header_.widthStep = width_step;
+ }
+ WImageC<T, C>::SetIpl(&header_);
+}
+
+// Construct a view into a region of an image
+template<typename T>
+WImageView<T> WImage<T>::View(int c, int r, int width, int height) {
+ return WImageView<T>(this, c, r, width, height);
+}
+
+template<typename T, int C>
+WImageViewC<T, C> WImageC<T, C>::View(int c, int r, int width, int height) {
+ return WImageViewC<T, C>(this, c, r, width, height);
+}
+
+} // end of namespace
+
+#endif // __cplusplus
+
+#endif
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/features2d.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/features2d.hpp
new file mode 100644
index 00000000..3ab3273f
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/features2d.hpp
@@ -0,0 +1,43 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#include "opencv2/features2d/features2d.hpp"
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/features2d/features2d.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/features2d/features2d.hpp
new file mode 100644
index 00000000..e4e796fb
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/features2d/features2d.hpp
@@ -0,0 +1,1616 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_FEATURES_2D_HPP__
+#define __OPENCV_FEATURES_2D_HPP__
+
+#include "opencv2/core/core.hpp"
+#include "opencv2/flann/miniflann.hpp"
+
+#ifdef __cplusplus
+#include <limits>
+
+namespace cv
+{
+
+CV_EXPORTS bool initModule_features2d();
+
+/*!
+ The Keypoint Class
+
+ The class instance stores a keypoint, i.e. a point feature found by one of many available keypoint detectors, such as
+ Harris corner detector, cv::FAST, cv::StarDetector, cv::SURF, cv::SIFT, cv::LDetector etc.
+
+ The keypoint is characterized by the 2D position, scale
+ (proportional to the diameter of the neighborhood that needs to be taken into account),
+ orientation and some other parameters. The keypoint neighborhood is then analyzed by another algorithm that builds a descriptor
+ (usually represented as a feature vector). The keypoints representing the same object in different images can then be matched using
+ cv::KDTree or another method.
+*/
+class CV_EXPORTS_W_SIMPLE KeyPoint
+{
+public:
+ //! the default constructor
+ CV_WRAP KeyPoint() : pt(0,0), size(0), angle(-1), response(0), octave(0), class_id(-1) {}
+ //! the full constructor
+ KeyPoint(Point2f _pt, float _size, float _angle=-1,
+ float _response=0, int _octave=0, int _class_id=-1)
+ : pt(_pt), size(_size), angle(_angle),
+ response(_response), octave(_octave), class_id(_class_id) {}
+ //! another form of the full constructor
+ CV_WRAP KeyPoint(float x, float y, float _size, float _angle=-1,
+ float _response=0, int _octave=0, int _class_id=-1)
+ : pt(x, y), size(_size), angle(_angle),
+ response(_response), octave(_octave), class_id(_class_id) {}
+
+ size_t hash() const;
+
+ //! converts vector of keypoints to vector of points
+ static void convert(const vector<KeyPoint>& keypoints,
+ CV_OUT vector<Point2f>& points2f,
+ const vector<int>& keypointIndexes=vector<int>());
+ //! converts vector of points to the vector of keypoints, where each keypoint is assigned the same size and the same orientation
+ static void convert(const vector<Point2f>& points2f,
+ CV_OUT vector<KeyPoint>& keypoints,
+ float size=1, float response=1, int octave=0, int class_id=-1);
+
+ //! computes overlap for pair of keypoints;
+ //! overlap is a ratio between area of keypoint regions intersection and
+ //! area of keypoint regions union (now keypoint region is circle)
+ static float overlap(const KeyPoint& kp1, const KeyPoint& kp2);
+
+ CV_PROP_RW Point2f pt; //!< coordinates of the keypoints
+ CV_PROP_RW float size; //!< diameter of the meaningful keypoint neighborhood
+ CV_PROP_RW float angle; //!< computed orientation of the keypoint (-1 if not applicable);
+ //!< it's in [0,360) degrees and measured relative to
+ //!< image coordinate system, ie in clockwise.
+ CV_PROP_RW float response; //!< the response by which the most strong keypoints have been selected. Can be used for the further sorting or subsampling
+ CV_PROP_RW int octave; //!< octave (pyramid layer) from which the keypoint has been extracted
+ CV_PROP_RW int class_id; //!< object class (if the keypoints need to be clustered by an object they belong to)
+};
+
+//! writes vector of keypoints to the file storage
+CV_EXPORTS void write(FileStorage& fs, const string& name, const vector<KeyPoint>& keypoints);
+//! reads vector of keypoints from the specified file storage node
+CV_EXPORTS void read(const FileNode& node, CV_OUT vector<KeyPoint>& keypoints);
+
+/*
+ * A class filters a vector of keypoints.
+ * Because now it is difficult to provide a convenient interface for all usage scenarios of the keypoints filter class,
+ * it has only several needed by now static methods.
+ */
+class CV_EXPORTS KeyPointsFilter
+{
+public:
+ KeyPointsFilter(){}
+
+ /*
+ * Remove keypoints within borderPixels of an image edge.
+ */
+ static void runByImageBorder( vector<KeyPoint>& keypoints, Size imageSize, int borderSize );
+ /*
+ * Remove keypoints of sizes out of range.
+ */
+ static void runByKeypointSize( vector<KeyPoint>& keypoints, float minSize,
+ float maxSize=FLT_MAX );
+ /*
+ * Remove keypoints from some image by mask for pixels of this image.
+ */
+ static void runByPixelsMask( vector<KeyPoint>& keypoints, const Mat& mask );
+ /*
+ * Remove duplicated keypoints.
+ */
+ static void removeDuplicated( vector<KeyPoint>& keypoints );
+
+ /*
+ * Retain the specified number of the best keypoints (according to the response)
+ */
+ static void retainBest( vector<KeyPoint>& keypoints, int npoints );
+};
+
+
+/************************************ Base Classes ************************************/
+
+/*
+ * Abstract base class for 2D image feature detectors.
+ */
+class CV_EXPORTS_W FeatureDetector : public virtual Algorithm
+{
+public:
+ virtual ~FeatureDetector();
+
+ /*
+ * Detect keypoints in an image.
+ * image The image.
+ * keypoints The detected keypoints.
+ * mask Mask specifying where to look for keypoints (optional). Must be a char
+ * matrix with non-zero values in the region of interest.
+ */
+ CV_WRAP void detect( const Mat& image, CV_OUT vector<KeyPoint>& keypoints, const Mat& mask=Mat() ) const;
+
+ /*
+ * Detect keypoints in an image set.
+ * images Image collection.
+ * keypoints Collection of keypoints detected in an input images. keypoints[i] is a set of keypoints detected in an images[i].
+ * masks Masks for image set. masks[i] is a mask for images[i].
+ */
+ void detect( const vector<Mat>& images, vector<vector<KeyPoint> >& keypoints, const vector<Mat>& masks=vector<Mat>() ) const;
+
+ // Return true if detector object is empty
+ CV_WRAP virtual bool empty() const;
+
+ // Create feature detector by detector name.
+ CV_WRAP static Ptr<FeatureDetector> create( const string& detectorType );
+
+protected:
+ virtual void detectImpl( const Mat& image, vector<KeyPoint>& keypoints, const Mat& mask=Mat() ) const = 0;
+
+ /*
+ * Remove keypoints that are not in the mask.
+ * Helper function, useful when wrapping a library call for keypoint detection that
+ * does not support a mask argument.
+ */
+ static void removeInvalidPoints( const Mat& mask, vector<KeyPoint>& keypoints );
+};
+
+
+/*
+ * Abstract base class for computing descriptors for image keypoints.
+ *
+ * In this interface we assume a keypoint descriptor can be represented as a
+ * dense, fixed-dimensional vector of some basic type. Most descriptors used
+ * in practice follow this pattern, as it makes it very easy to compute
+ * distances between descriptors. Therefore we represent a collection of
+ * descriptors as a Mat, where each row is one keypoint descriptor.
+ */
+class CV_EXPORTS_W DescriptorExtractor : public virtual Algorithm
+{
+public:
+ virtual ~DescriptorExtractor();
+
+ /*
+ * Compute the descriptors for a set of keypoints in an image.
+ * image The image.
+ * keypoints The input keypoints. Keypoints for which a descriptor cannot be computed are removed.
+ * descriptors Copmputed descriptors. Row i is the descriptor for keypoint i.
+ */
+ CV_WRAP void compute( const Mat& image, CV_OUT CV_IN_OUT vector<KeyPoint>& keypoints, CV_OUT Mat& descriptors ) const;
+
+ /*
+ * Compute the descriptors for a keypoints collection detected in image collection.
+ * images Image collection.
+ * keypoints Input keypoints collection. keypoints[i] is keypoints detected in images[i].
+ * Keypoints for which a descriptor cannot be computed are removed.
+ * descriptors Descriptor collection. descriptors[i] are descriptors computed for set keypoints[i].
+ */
+ void compute( const vector<Mat>& images, vector<vector<KeyPoint> >& keypoints, vector<Mat>& descriptors ) const;
+
+ CV_WRAP virtual int descriptorSize() const = 0;
+ CV_WRAP virtual int descriptorType() const = 0;
+
+ CV_WRAP virtual bool empty() const;
+
+ CV_WRAP static Ptr<DescriptorExtractor> create( const string& descriptorExtractorType );
+
+protected:
+ virtual void computeImpl( const Mat& image, vector<KeyPoint>& keypoints, Mat& descriptors ) const = 0;
+
+ /*
+ * Remove keypoints within borderPixels of an image edge.
+ */
+ static void removeBorderKeypoints( vector<KeyPoint>& keypoints,
+ Size imageSize, int borderSize );
+};
+
+
+
+/*
+ * Abstract base class for simultaneous 2D feature detection descriptor extraction.
+ */
+class CV_EXPORTS_W Feature2D : public FeatureDetector, public DescriptorExtractor
+{
+public:
+ /*
+ * Detect keypoints in an image.
+ * image The image.
+ * keypoints The detected keypoints.
+ * mask Mask specifying where to look for keypoints (optional). Must be a char
+ * matrix with non-zero values in the region of interest.
+ * useProvidedKeypoints If true, the method will skip the detection phase and will compute
+ * descriptors for the provided keypoints
+ */
+ CV_WRAP_AS(detectAndCompute) virtual void operator()( InputArray image, InputArray mask,
+ CV_OUT vector<KeyPoint>& keypoints,
+ OutputArray descriptors,
+ bool useProvidedKeypoints=false ) const = 0;
+
+ CV_WRAP void compute( const Mat& image, CV_OUT CV_IN_OUT std::vector<KeyPoint>& keypoints, CV_OUT Mat& descriptors ) const;
+
+ // Create feature detector and descriptor extractor by name.
+ CV_WRAP static Ptr<Feature2D> create( const string& name );
+};
+
+/*!
+ BRISK implementation
+*/
+class CV_EXPORTS_W BRISK : public Feature2D
+{
+public:
+ CV_WRAP explicit BRISK(int thresh=30, int octaves=3, float patternScale=1.0f);
+
+ virtual ~BRISK();
+
+ // returns the descriptor size in bytes
+ int descriptorSize() const;
+ // returns the descriptor type
+ int descriptorType() const;
+
+ // Compute the BRISK features on an image
+ void operator()(InputArray image, InputArray mask, vector<KeyPoint>& keypoints) const;
+
+ // Compute the BRISK features and descriptors on an image
+ void operator()( InputArray image, InputArray mask, vector<KeyPoint>& keypoints,
+ OutputArray descriptors, bool useProvidedKeypoints=false ) const;
+
+ AlgorithmInfo* info() const;
+
+ // custom setup
+ CV_WRAP explicit BRISK(std::vector<float> &radiusList, std::vector<int> &numberList,
+ float dMax=5.85f, float dMin=8.2f, std::vector<int> indexChange=std::vector<int>());
+
+ // call this to generate the kernel:
+ // circle of radius r (pixels), with n points;
+ // short pairings with dMax, long pairings with dMin
+ CV_WRAP void generateKernel(std::vector<float> &radiusList,
+ std::vector<int> &numberList, float dMax=5.85f, float dMin=8.2f,
+ std::vector<int> indexChange=std::vector<int>());
+
+protected:
+
+ void computeImpl( const Mat& image, vector<KeyPoint>& keypoints, Mat& descriptors ) const;
+ void detectImpl( const Mat& image, vector<KeyPoint>& keypoints, const Mat& mask=Mat() ) const;
+
+ void computeKeypointsNoOrientation(InputArray image, InputArray mask, vector<KeyPoint>& keypoints) const;
+ void computeDescriptorsAndOrOrientation(InputArray image, InputArray mask, vector<KeyPoint>& keypoints,
+ OutputArray descriptors, bool doDescriptors, bool doOrientation,
+ bool useProvidedKeypoints) const;
+
+ // Feature parameters
+ CV_PROP_RW int threshold;
+ CV_PROP_RW int octaves;
+
+ // some helper structures for the Brisk pattern representation
+ struct BriskPatternPoint{
+ float x; // x coordinate relative to center
+ float y; // x coordinate relative to center
+ float sigma; // Gaussian smoothing sigma
+ };
+ struct BriskShortPair{
+ unsigned int i; // index of the first pattern point
+ unsigned int j; // index of other pattern point
+ };
+ struct BriskLongPair{
+ unsigned int i; // index of the first pattern point
+ unsigned int j; // index of other pattern point
+ int weighted_dx; // 1024.0/dx
+ int weighted_dy; // 1024.0/dy
+ };
+ inline int smoothedIntensity(const cv::Mat& image,
+ const cv::Mat& integral,const float key_x,
+ const float key_y, const unsigned int scale,
+ const unsigned int rot, const unsigned int point) const;
+ // pattern properties
+ BriskPatternPoint* patternPoints_; //[i][rotation][scale]
+ unsigned int points_; // total number of collocation points
+ float* scaleList_; // lists the scaling per scale index [scale]
+ unsigned int* sizeList_; // lists the total pattern size per scale index [scale]
+ static const unsigned int scales_; // scales discretization
+ static const float scalerange_; // span of sizes 40->4 Octaves - else, this needs to be adjusted...
+ static const unsigned int n_rot_; // discretization of the rotation look-up
+
+ // pairs
+ int strings_; // number of uchars the descriptor consists of
+ float dMax_; // short pair maximum distance
+ float dMin_; // long pair maximum distance
+ BriskShortPair* shortPairs_; // d<_dMax
+ BriskLongPair* longPairs_; // d>_dMin
+ unsigned int noShortPairs_; // number of shortParis
+ unsigned int noLongPairs_; // number of longParis
+
+ // general
+ static const float basicSize_;
+};
+
+
+/*!
+ ORB implementation.
+*/
+class CV_EXPORTS_W ORB : public Feature2D
+{
+public:
+ // the size of the signature in bytes
+ enum { kBytes = 32, HARRIS_SCORE=0, FAST_SCORE=1 };
+
+ CV_WRAP explicit ORB(int nfeatures = 500, float scaleFactor = 1.2f, int nlevels = 8, int edgeThreshold = 31,
+ int firstLevel = 0, int WTA_K=2, int scoreType=ORB::HARRIS_SCORE, int patchSize=31 );
+
+ // returns the descriptor size in bytes
+ int descriptorSize() const;
+ // returns the descriptor type
+ int descriptorType() const;
+
+ // Compute the ORB features and descriptors on an image
+ void operator()(InputArray image, InputArray mask, vector<KeyPoint>& keypoints) const;
+
+ // Compute the ORB features and descriptors on an image
+ void operator()( InputArray image, InputArray mask, vector<KeyPoint>& keypoints,
+ OutputArray descriptors, bool useProvidedKeypoints=false ) const;
+
+ AlgorithmInfo* info() const;
+
+protected:
+
+ void computeImpl( const Mat& image, vector<KeyPoint>& keypoints, Mat& descriptors ) const;
+ void detectImpl( const Mat& image, vector<KeyPoint>& keypoints, const Mat& mask=Mat() ) const;
+
+ CV_PROP_RW int nfeatures;
+ CV_PROP_RW double scaleFactor;
+ CV_PROP_RW int nlevels;
+ CV_PROP_RW int edgeThreshold;
+ CV_PROP_RW int firstLevel;
+ CV_PROP_RW int WTA_K;
+ CV_PROP_RW int scoreType;
+ CV_PROP_RW int patchSize;
+};
+
+typedef ORB OrbFeatureDetector;
+typedef ORB OrbDescriptorExtractor;
+
+/*!
+ FREAK implementation
+*/
+class CV_EXPORTS FREAK : public DescriptorExtractor
+{
+public:
+ /** Constructor
+ * @param orientationNormalized enable orientation normalization
+ * @param scaleNormalized enable scale normalization
+ * @param patternScale scaling of the description pattern
+ * @param nOctaves number of octaves covered by the detected keypoints
+ * @param selectedPairs (optional) user defined selected pairs
+ */
+ explicit FREAK( bool orientationNormalized = true,
+ bool scaleNormalized = true,
+ float patternScale = 22.0f,
+ int nOctaves = 4,
+ const vector<int>& selectedPairs = vector<int>());
+ FREAK( const FREAK& rhs );
+ FREAK& operator=( const FREAK& );
+
+ virtual ~FREAK();
+
+ /** returns the descriptor length in bytes */
+ virtual int descriptorSize() const;
+
+ /** returns the descriptor type */
+ virtual int descriptorType() const;
+
+ /** select the 512 "best description pairs"
+ * @param images grayscale images set
+ * @param keypoints set of detected keypoints
+ * @param corrThresh correlation threshold
+ * @param verbose print construction information
+ * @return list of best pair indexes
+ */
+ vector<int> selectPairs( const vector<Mat>& images, vector<vector<KeyPoint> >& keypoints,
+ const double corrThresh = 0.7, bool verbose = true );
+
+ AlgorithmInfo* info() const;
+
+ enum
+ {
+ NB_SCALES = 64, NB_PAIRS = 512, NB_ORIENPAIRS = 45
+ };
+
+protected:
+ virtual void computeImpl( const Mat& image, vector<KeyPoint>& keypoints, Mat& descriptors ) const;
+ void buildPattern();
+ uchar meanIntensity( const Mat& image, const Mat& integral, const float kp_x, const float kp_y,
+ const unsigned int scale, const unsigned int rot, const unsigned int point ) const;
+
+ bool orientationNormalized; //true if the orientation is normalized, false otherwise
+ bool scaleNormalized; //true if the scale is normalized, false otherwise
+ double patternScale; //scaling of the pattern
+ int nOctaves; //number of octaves
+ bool extAll; // true if all pairs need to be extracted for pairs selection
+
+ double patternScale0;
+ int nOctaves0;
+ vector<int> selectedPairs0;
+
+ struct PatternPoint
+ {
+ float x; // x coordinate relative to center
+ float y; // x coordinate relative to center
+ float sigma; // Gaussian smoothing sigma
+ };
+
+ struct DescriptionPair
+ {
+ uchar i; // index of the first point
+ uchar j; // index of the second point
+ };
+
+ struct OrientationPair
+ {
+ uchar i; // index of the first point
+ uchar j; // index of the second point
+ int weight_dx; // dx/(norm_sq))*4096
+ int weight_dy; // dy/(norm_sq))*4096
+ };
+
+ vector<PatternPoint> patternLookup; // look-up table for the pattern points (position+sigma of all points at all scales and orientation)
+ int patternSizes[NB_SCALES]; // size of the pattern at a specific scale (used to check if a point is within image boundaries)
+ DescriptionPair descriptionPairs[NB_PAIRS];
+ OrientationPair orientationPairs[NB_ORIENPAIRS];
+};
+
+
+/*!
+ Maximal Stable Extremal Regions class.
+
+ The class implements MSER algorithm introduced by J. Matas.
+ Unlike SIFT, SURF and many other detectors in OpenCV, this is salient region detector,
+ not the salient point detector.
+
+ It returns the regions, each of those is encoded as a contour.
+*/
+class CV_EXPORTS_W MSER : public FeatureDetector
+{
+public:
+ //! the full constructor
+ CV_WRAP explicit MSER( int _delta=5, int _min_area=60, int _max_area=14400,
+ double _max_variation=0.25, double _min_diversity=.2,
+ int _max_evolution=200, double _area_threshold=1.01,
+ double _min_margin=0.003, int _edge_blur_size=5 );
+
+ //! the operator that extracts the MSERs from the image or the specific part of it
+ CV_WRAP_AS(detect) void operator()( const Mat& image, CV_OUT vector<vector<Point> >& msers,
+ const Mat& mask=Mat() ) const;
+ AlgorithmInfo* info() const;
+
+protected:
+ void detectImpl( const Mat& image, vector<KeyPoint>& keypoints, const Mat& mask=Mat() ) const;
+
+ int delta;
+ int minArea;
+ int maxArea;
+ double maxVariation;
+ double minDiversity;
+ int maxEvolution;
+ double areaThreshold;
+ double minMargin;
+ int edgeBlurSize;
+};
+
+typedef MSER MserFeatureDetector;
+
+/*!
+ The "Star" Detector.
+
+ The class implements the keypoint detector introduced by K. Konolige.
+*/
+class CV_EXPORTS_W StarDetector : public FeatureDetector
+{
+public:
+ //! the full constructor
+ CV_WRAP StarDetector(int _maxSize=45, int _responseThreshold=30,
+ int _lineThresholdProjected=10,
+ int _lineThresholdBinarized=8,
+ int _suppressNonmaxSize=5);
+
+ //! finds the keypoints in the image
+ CV_WRAP_AS(detect) void operator()(const Mat& image,
+ CV_OUT vector<KeyPoint>& keypoints) const;
+
+ AlgorithmInfo* info() const;
+
+protected:
+ void detectImpl( const Mat& image, vector<KeyPoint>& keypoints, const Mat& mask=Mat() ) const;
+
+ int maxSize;
+ int responseThreshold;
+ int lineThresholdProjected;
+ int lineThresholdBinarized;
+ int suppressNonmaxSize;
+};
+
+//! detects corners using FAST algorithm by E. Rosten
+CV_EXPORTS void FAST( InputArray image, CV_OUT vector<KeyPoint>& keypoints,
+ int threshold, bool nonmaxSuppression=true );
+
+CV_EXPORTS void FASTX( InputArray image, CV_OUT vector<KeyPoint>& keypoints,
+ int threshold, bool nonmaxSuppression, int type );
+
+class CV_EXPORTS_W FastFeatureDetector : public FeatureDetector
+{
+public:
+
+ enum
+ { // Define it in old class to simplify migration to 2.5
+ TYPE_5_8 = 0, TYPE_7_12 = 1, TYPE_9_16 = 2
+ };
+
+ CV_WRAP FastFeatureDetector( int threshold=10, bool nonmaxSuppression=true );
+ AlgorithmInfo* info() const;
+
+protected:
+ virtual void detectImpl( const Mat& image, vector<KeyPoint>& keypoints, const Mat& mask=Mat() ) const;
+
+ int threshold;
+ bool nonmaxSuppression;
+};
+
+
+class CV_EXPORTS_W GFTTDetector : public FeatureDetector
+{
+public:
+ CV_WRAP GFTTDetector( int maxCorners=1000, double qualityLevel=0.01, double minDistance=1,
+ int blockSize=3, bool useHarrisDetector=false, double k=0.04 );
+ AlgorithmInfo* info() const;
+
+protected:
+ virtual void detectImpl( const Mat& image, vector<KeyPoint>& keypoints, const Mat& mask=Mat() ) const;
+
+ int nfeatures;
+ double qualityLevel;
+ double minDistance;
+ int blockSize;
+ bool useHarrisDetector;
+ double k;
+};
+
+typedef GFTTDetector GoodFeaturesToTrackDetector;
+typedef StarDetector StarFeatureDetector;
+
+class CV_EXPORTS_W SimpleBlobDetector : public FeatureDetector
+{
+public:
+ struct CV_EXPORTS_W_SIMPLE Params
+ {
+ CV_WRAP Params();
+ CV_PROP_RW float thresholdStep;
+ CV_PROP_RW float minThreshold;
+ CV_PROP_RW float maxThreshold;
+ CV_PROP_RW size_t minRepeatability;
+ CV_PROP_RW float minDistBetweenBlobs;
+
+ CV_PROP_RW bool filterByColor;
+ CV_PROP_RW uchar blobColor;
+
+ CV_PROP_RW bool filterByArea;
+ CV_PROP_RW float minArea, maxArea;
+
+ CV_PROP_RW bool filterByCircularity;
+ CV_PROP_RW float minCircularity, maxCircularity;
+
+ CV_PROP_RW bool filterByInertia;
+ CV_PROP_RW float minInertiaRatio, maxInertiaRatio;
+
+ CV_PROP_RW bool filterByConvexity;
+ CV_PROP_RW float minConvexity, maxConvexity;
+
+ void read( const FileNode& fn );
+ void write( FileStorage& fs ) const;
+ };
+
+ CV_WRAP SimpleBlobDetector(const SimpleBlobDetector::Params &parameters = SimpleBlobDetector::Params());
+
+ virtual void read( const FileNode& fn );
+ virtual void write( FileStorage& fs ) const;
+
+protected:
+ struct CV_EXPORTS Center
+ {
+ Point2d location;
+ double radius;
+ double confidence;
+ };
+
+ virtual void detectImpl( const Mat& image, vector<KeyPoint>& keypoints, const Mat& mask=Mat() ) const;
+ virtual void findBlobs(const Mat &image, const Mat &binaryImage, vector<Center> &centers) const;
+
+ Params params;
+ AlgorithmInfo* info() const;
+};
+
+
+class CV_EXPORTS DenseFeatureDetector : public FeatureDetector
+{
+public:
+ explicit DenseFeatureDetector( float initFeatureScale=1.f, int featureScaleLevels=1,
+ float featureScaleMul=0.1f,
+ int initXyStep=6, int initImgBound=0,
+ bool varyXyStepWithScale=true,
+ bool varyImgBoundWithScale=false );
+ AlgorithmInfo* info() const;
+
+protected:
+ virtual void detectImpl( const Mat& image, vector<KeyPoint>& keypoints, const Mat& mask=Mat() ) const;
+
+ double initFeatureScale;
+ int featureScaleLevels;
+ double featureScaleMul;
+
+ int initXyStep;
+ int initImgBound;
+
+ bool varyXyStepWithScale;
+ bool varyImgBoundWithScale;
+};
+
+/*
+ * Adapts a detector to partition the source image into a grid and detect
+ * points in each cell.
+ */
+class CV_EXPORTS_W GridAdaptedFeatureDetector : public FeatureDetector
+{
+public:
+ /*
+ * detector Detector that will be adapted.
+ * maxTotalKeypoints Maximum count of keypoints detected on the image. Only the strongest keypoints
+ * will be keeped.
+ * gridRows Grid rows count.
+ * gridCols Grid column count.
+ */
+ CV_WRAP GridAdaptedFeatureDetector( const Ptr<FeatureDetector>& detector=0,
+ int maxTotalKeypoints=1000,
+ int gridRows=4, int gridCols=4 );
+
+ // TODO implement read/write
+ virtual bool empty() const;
+
+ AlgorithmInfo* info() const;
+
+protected:
+ virtual void detectImpl( const Mat& image, vector<KeyPoint>& keypoints, const Mat& mask=Mat() ) const;
+
+ Ptr<FeatureDetector> detector;
+ int maxTotalKeypoints;
+ int gridRows;
+ int gridCols;
+};
+
+/*
+ * Adapts a detector to detect points over multiple levels of a Gaussian
+ * pyramid. Useful for detectors that are not inherently scaled.
+ */
+class CV_EXPORTS_W PyramidAdaptedFeatureDetector : public FeatureDetector
+{
+public:
+ // maxLevel - The 0-based index of the last pyramid layer
+ CV_WRAP PyramidAdaptedFeatureDetector( const Ptr<FeatureDetector>& detector, int maxLevel=2 );
+
+ // TODO implement read/write
+ virtual bool empty() const;
+
+protected:
+ virtual void detectImpl( const Mat& image, vector<KeyPoint>& keypoints, const Mat& mask=Mat() ) const;
+
+ Ptr<FeatureDetector> detector;
+ int maxLevel;
+};
+
+/** \brief A feature detector parameter adjuster, this is used by the DynamicAdaptedFeatureDetector
+ * and is a wrapper for FeatureDetector that allow them to be adjusted after a detection
+ */
+class CV_EXPORTS AdjusterAdapter: public FeatureDetector
+{
+public:
+ /** pure virtual interface
+ */
+ virtual ~AdjusterAdapter() {}
+ /** too few features were detected so, adjust the detector params accordingly
+ * \param min the minimum number of desired features
+ * \param n_detected the number previously detected
+ */
+ virtual void tooFew(int min, int n_detected) = 0;
+ /** too many features were detected so, adjust the detector params accordingly
+ * \param max the maximum number of desired features
+ * \param n_detected the number previously detected
+ */
+ virtual void tooMany(int max, int n_detected) = 0;
+ /** are params maxed out or still valid?
+ * \return false if the parameters can't be adjusted any more
+ */
+ virtual bool good() const = 0;
+
+ virtual Ptr<AdjusterAdapter> clone() const = 0;
+
+ static Ptr<AdjusterAdapter> create( const string& detectorType );
+};
+/** \brief an adaptively adjusting detector that iteratively detects until the desired number
+ * of features are detected.
+ * Beware that this is not thread safe - as the adjustment of parameters breaks the const
+ * of the detection routine...
+ * /TODO Make this const correct and thread safe
+ *
+ * sample usage:
+ //will create a detector that attempts to find 100 - 110 FAST Keypoints, and will at most run
+ //FAST feature detection 10 times until that number of keypoints are found
+ Ptr<FeatureDetector> detector(new DynamicAdaptedFeatureDetector(new FastAdjuster(20,true),100, 110, 10));
+
+ */
+class CV_EXPORTS DynamicAdaptedFeatureDetector: public FeatureDetector
+{
+public:
+
+ /** \param adjuster an AdjusterAdapter that will do the detection and parameter adjustment
+ * \param max_features the maximum desired number of features
+ * \param max_iters the maximum number of times to try to adjust the feature detector params
+ * for the FastAdjuster this can be high, but with Star or Surf this can get time consuming
+ * \param min_features the minimum desired features
+ */
+ DynamicAdaptedFeatureDetector( const Ptr<AdjusterAdapter>& adjuster, int min_features=400, int max_features=500, int max_iters=5 );
+
+ virtual bool empty() const;
+
+protected:
+ virtual void detectImpl( const Mat& image, vector<KeyPoint>& keypoints, const Mat& mask=Mat() ) const;
+
+private:
+ DynamicAdaptedFeatureDetector& operator=(const DynamicAdaptedFeatureDetector&);
+ DynamicAdaptedFeatureDetector(const DynamicAdaptedFeatureDetector&);
+
+ int escape_iters_;
+ int min_features_, max_features_;
+ const Ptr<AdjusterAdapter> adjuster_;
+};
+
+/**\brief an adjust for the FAST detector. This will basically decrement or increment the
+ * threshold by 1
+ */
+class CV_EXPORTS FastAdjuster: public AdjusterAdapter
+{
+public:
+ /**\param init_thresh the initial threshold to start with, default = 20
+ * \param nonmax whether to use non max or not for fast feature detection
+ * \param min_thresh
+ * \param max_thresh
+ */
+ FastAdjuster(int init_thresh=20, bool nonmax=true, int min_thresh=1, int max_thresh=200);
+
+ virtual void tooFew(int minv, int n_detected);
+ virtual void tooMany(int maxv, int n_detected);
+ virtual bool good() const;
+
+ virtual Ptr<AdjusterAdapter> clone() const;
+
+protected:
+ virtual void detectImpl( const Mat& image, vector<KeyPoint>& keypoints, const Mat& mask=Mat() ) const;
+
+ int thresh_;
+ bool nonmax_;
+ int init_thresh_, min_thresh_, max_thresh_;
+};
+
+
+/** An adjuster for StarFeatureDetector, this one adjusts the responseThreshold for now
+ * TODO find a faster way to converge the parameters for Star - use CvStarDetectorParams
+ */
+class CV_EXPORTS StarAdjuster: public AdjusterAdapter
+{
+public:
+ StarAdjuster(double initial_thresh=30.0, double min_thresh=2., double max_thresh=200.);
+
+ virtual void tooFew(int minv, int n_detected);
+ virtual void tooMany(int maxv, int n_detected);
+ virtual bool good() const;
+
+ virtual Ptr<AdjusterAdapter> clone() const;
+
+protected:
+ virtual void detectImpl( const Mat& image, vector<KeyPoint>& keypoints, const Mat& mask=Mat() ) const;
+
+ double thresh_, init_thresh_, min_thresh_, max_thresh_;
+};
+
+class CV_EXPORTS SurfAdjuster: public AdjusterAdapter
+{
+public:
+ SurfAdjuster( double initial_thresh=400.f, double min_thresh=2, double max_thresh=1000 );
+
+ virtual void tooFew(int minv, int n_detected);
+ virtual void tooMany(int maxv, int n_detected);
+ virtual bool good() const;
+
+ virtual Ptr<AdjusterAdapter> clone() const;
+
+protected:
+ virtual void detectImpl( const Mat& image, vector<KeyPoint>& keypoints, const Mat& mask=Mat() ) const;
+
+ double thresh_, init_thresh_, min_thresh_, max_thresh_;
+};
+
+CV_EXPORTS Mat windowedMatchingMask( const vector<KeyPoint>& keypoints1, const vector<KeyPoint>& keypoints2,
+ float maxDeltaX, float maxDeltaY );
+
+
+
+/*
+ * OpponentColorDescriptorExtractor
+ *
+ * Adapts a descriptor extractor to compute descriptors in Opponent Color Space
+ * (refer to van de Sande et al., CGIV 2008 "Color Descriptors for Object Category Recognition").
+ * Input RGB image is transformed in Opponent Color Space. Then unadapted descriptor extractor
+ * (set in constructor) computes descriptors on each of the three channel and concatenate
+ * them into a single color descriptor.
+ */
+class CV_EXPORTS OpponentColorDescriptorExtractor : public DescriptorExtractor
+{
+public:
+ OpponentColorDescriptorExtractor( const Ptr<DescriptorExtractor>& descriptorExtractor );
+
+ virtual void read( const FileNode& );
+ virtual void write( FileStorage& ) const;
+
+ virtual int descriptorSize() const;
+ virtual int descriptorType() const;
+
+ virtual bool empty() const;
+
+protected:
+ virtual void computeImpl( const Mat& image, vector<KeyPoint>& keypoints, Mat& descriptors ) const;
+
+ Ptr<DescriptorExtractor> descriptorExtractor;
+};
+
+/*
+ * BRIEF Descriptor
+ */
+class CV_EXPORTS BriefDescriptorExtractor : public DescriptorExtractor
+{
+public:
+ static const int PATCH_SIZE = 48;
+ static const int KERNEL_SIZE = 9;
+
+ // bytes is a length of descriptor in bytes. It can be equal 16, 32 or 64 bytes.
+ BriefDescriptorExtractor( int bytes = 32 );
+
+ virtual void read( const FileNode& );
+ virtual void write( FileStorage& ) const;
+
+ virtual int descriptorSize() const;
+ virtual int descriptorType() const;
+
+ /// @todo read and write for brief
+
+ AlgorithmInfo* info() const;
+
+protected:
+ virtual void computeImpl(const Mat& image, vector<KeyPoint>& keypoints, Mat& descriptors) const;
+
+ typedef void(*PixelTestFn)(const Mat&, const vector<KeyPoint>&, Mat&);
+
+ int bytes_;
+ PixelTestFn test_fn_;
+};
+
+
+/****************************************************************************************\
+* Distance *
+\****************************************************************************************/
+
+template<typename T>
+struct CV_EXPORTS Accumulator
+{
+ typedef T Type;
+};
+
+template<> struct Accumulator<unsigned char> { typedef float Type; };
+template<> struct Accumulator<unsigned short> { typedef float Type; };
+template<> struct Accumulator<char> { typedef float Type; };
+template<> struct Accumulator<short> { typedef float Type; };
+
+/*
+ * Squared Euclidean distance functor
+ */
+template<class T>
+struct CV_EXPORTS SL2
+{
+ enum { normType = NORM_L2SQR };
+ typedef T ValueType;
+ typedef typename Accumulator<T>::Type ResultType;
+
+ ResultType operator()( const T* a, const T* b, int size ) const
+ {
+ return normL2Sqr<ValueType, ResultType>(a, b, size);
+ }
+};
+
+/*
+ * Euclidean distance functor
+ */
+template<class T>
+struct CV_EXPORTS L2
+{
+ enum { normType = NORM_L2 };
+ typedef T ValueType;
+ typedef typename Accumulator<T>::Type ResultType;
+
+ ResultType operator()( const T* a, const T* b, int size ) const
+ {
+ return (ResultType)sqrt((double)normL2Sqr<ValueType, ResultType>(a, b, size));
+ }
+};
+
+/*
+ * Manhattan distance (city block distance) functor
+ */
+template<class T>
+struct CV_EXPORTS L1
+{
+ enum { normType = NORM_L1 };
+ typedef T ValueType;
+ typedef typename Accumulator<T>::Type ResultType;
+
+ ResultType operator()( const T* a, const T* b, int size ) const
+ {
+ return normL1<ValueType, ResultType>(a, b, size);
+ }
+};
+
+/*
+ * Hamming distance functor - counts the bit differences between two strings - useful for the Brief descriptor
+ * bit count of A exclusive XOR'ed with B
+ */
+struct CV_EXPORTS Hamming
+{
+ enum { normType = NORM_HAMMING };
+ typedef unsigned char ValueType;
+ typedef int ResultType;
+
+ /** this will count the bits in a ^ b
+ */
+ ResultType operator()( const unsigned char* a, const unsigned char* b, int size ) const
+ {
+ return normHamming(a, b, size);
+ }
+};
+
+typedef Hamming HammingLUT;
+
+template<int cellsize> struct HammingMultilevel
+{
+ enum { normType = NORM_HAMMING + (cellsize>1) };
+ typedef unsigned char ValueType;
+ typedef int ResultType;
+
+ ResultType operator()( const unsigned char* a, const unsigned char* b, int size ) const
+ {
+ return normHamming(a, b, size, cellsize);
+ }
+};
+
+/****************************************************************************************\
+* DMatch *
+\****************************************************************************************/
+/*
+ * Struct for matching: query descriptor index, train descriptor index, train image index and distance between descriptors.
+ */
+struct CV_EXPORTS_W_SIMPLE DMatch
+{
+ CV_WRAP DMatch() : queryIdx(-1), trainIdx(-1), imgIdx(-1), distance(FLT_MAX) {}
+ CV_WRAP DMatch( int _queryIdx, int _trainIdx, float _distance ) :
+ queryIdx(_queryIdx), trainIdx(_trainIdx), imgIdx(-1), distance(_distance) {}
+ CV_WRAP DMatch( int _queryIdx, int _trainIdx, int _imgIdx, float _distance ) :
+ queryIdx(_queryIdx), trainIdx(_trainIdx), imgIdx(_imgIdx), distance(_distance) {}
+
+ CV_PROP_RW int queryIdx; // query descriptor index
+ CV_PROP_RW int trainIdx; // train descriptor index
+ CV_PROP_RW int imgIdx; // train image index
+
+ CV_PROP_RW float distance;
+
+ // less is better
+ bool operator<( const DMatch &m ) const
+ {
+ return distance < m.distance;
+ }
+};
+
+/****************************************************************************************\
+* DescriptorMatcher *
+\****************************************************************************************/
+/*
+ * Abstract base class for matching two sets of descriptors.
+ */
+class CV_EXPORTS_W DescriptorMatcher : public Algorithm
+{
+public:
+ virtual ~DescriptorMatcher();
+
+ /*
+ * Add descriptors to train descriptor collection.
+ * descriptors Descriptors to add. Each descriptors[i] is a descriptors set from one image.
+ */
+ CV_WRAP virtual void add( const vector<Mat>& descriptors );
+ /*
+ * Get train descriptors collection.
+ */
+ CV_WRAP const vector<Mat>& getTrainDescriptors() const;
+ /*
+ * Clear train descriptors collection.
+ */
+ CV_WRAP virtual void clear();
+
+ /*
+ * Return true if there are not train descriptors in collection.
+ */
+ CV_WRAP virtual bool empty() const;
+ /*
+ * Return true if the matcher supports mask in match methods.
+ */
+ CV_WRAP virtual bool isMaskSupported() const = 0;
+
+ /*
+ * Train matcher (e.g. train flann index).
+ * In all methods to match the method train() is run every time before matching.
+ * Some descriptor matchers (e.g. BruteForceMatcher) have empty implementation
+ * of this method, other matchers really train their inner structures
+ * (e.g. FlannBasedMatcher trains flann::Index). So nonempty implementation
+ * of train() should check the class object state and do traing/retraining
+ * only if the state requires that (e.g. FlannBasedMatcher trains flann::Index
+ * if it has not trained yet or if new descriptors have been added to the train
+ * collection).
+ */
+ CV_WRAP virtual void train();
+ /*
+ * Group of methods to match descriptors from image pair.
+ * Method train() is run in this methods.
+ */
+ // Find one best match for each query descriptor (if mask is empty).
+ CV_WRAP void match( const Mat& queryDescriptors, const Mat& trainDescriptors,
+ CV_OUT vector<DMatch>& matches, const Mat& mask=Mat() ) const;
+ // Find k best matches for each query descriptor (in increasing order of distances).
+ // compactResult is used when mask is not empty. If compactResult is false matches
+ // vector will have the same size as queryDescriptors rows. If compactResult is true
+ // matches vector will not contain matches for fully masked out query descriptors.
+ CV_WRAP void knnMatch( const Mat& queryDescriptors, const Mat& trainDescriptors,
+ CV_OUT vector<vector<DMatch> >& matches, int k,
+ const Mat& mask=Mat(), bool compactResult=false ) const;
+ // Find best matches for each query descriptor which have distance less than
+ // maxDistance (in increasing order of distances).
+ void radiusMatch( const Mat& queryDescriptors, const Mat& trainDescriptors,
+ vector<vector<DMatch> >& matches, float maxDistance,
+ const Mat& mask=Mat(), bool compactResult=false ) const;
+ /*
+ * Group of methods to match descriptors from one image to image set.
+ * See description of similar methods for matching image pair above.
+ */
+ CV_WRAP void match( const Mat& queryDescriptors, CV_OUT vector<DMatch>& matches,
+ const vector<Mat>& masks=vector<Mat>() );
+ CV_WRAP void knnMatch( const Mat& queryDescriptors, CV_OUT vector<vector<DMatch> >& matches, int k,
+ const vector<Mat>& masks=vector<Mat>(), bool compactResult=false );
+ void radiusMatch( const Mat& queryDescriptors, vector<vector<DMatch> >& matches, float maxDistance,
+ const vector<Mat>& masks=vector<Mat>(), bool compactResult=false );
+
+ // Reads matcher object from a file node
+ virtual void read( const FileNode& );
+ // Writes matcher object to a file storage
+ virtual void write( FileStorage& ) const;
+
+ // Clone the matcher. If emptyTrainData is false the method create deep copy of the object, i.e. copies
+ // both parameters and train data. If emptyTrainData is true the method create object copy with current parameters
+ // but with empty train data.
+ virtual Ptr<DescriptorMatcher> clone( bool emptyTrainData=false ) const = 0;
+
+ CV_WRAP static Ptr<DescriptorMatcher> create( const string& descriptorMatcherType );
+protected:
+ /*
+ * Class to work with descriptors from several images as with one merged matrix.
+ * It is used e.g. in FlannBasedMatcher.
+ */
+ class CV_EXPORTS DescriptorCollection
+ {
+ public:
+ DescriptorCollection();
+ DescriptorCollection( const DescriptorCollection& collection );
+ virtual ~DescriptorCollection();
+
+ // Vector of matrices "descriptors" will be merged to one matrix "mergedDescriptors" here.
+ void set( const vector<Mat>& descriptors );
+ virtual void clear();
+
+ const Mat& getDescriptors() const;
+ const Mat getDescriptor( int imgIdx, int localDescIdx ) const;
+ const Mat getDescriptor( int globalDescIdx ) const;
+ void getLocalIdx( int globalDescIdx, int& imgIdx, int& localDescIdx ) const;
+
+ int size() const;
+
+ protected:
+ Mat mergedDescriptors;
+ vector<int> startIdxs;
+ };
+
+ // In fact the matching is implemented only by the following two methods. These methods suppose
+ // that the class object has been trained already. Public match methods call these methods
+ // after calling train().
+ virtual void knnMatchImpl( const Mat& queryDescriptors, vector<vector<DMatch> >& matches, int k,
+ const vector<Mat>& masks=vector<Mat>(), bool compactResult=false ) = 0;
+ virtual void radiusMatchImpl( const Mat& queryDescriptors, vector<vector<DMatch> >& matches, float maxDistance,
+ const vector<Mat>& masks=vector<Mat>(), bool compactResult=false ) = 0;
+
+ static bool isPossibleMatch( const Mat& mask, int queryIdx, int trainIdx );
+ static bool isMaskedOut( const vector<Mat>& masks, int queryIdx );
+
+ static Mat clone_op( Mat m ) { return m.clone(); }
+ void checkMasks( const vector<Mat>& masks, int queryDescriptorsCount ) const;
+
+ // Collection of descriptors from train images.
+ vector<Mat> trainDescCollection;
+};
+
+/*
+ * Brute-force descriptor matcher.
+ *
+ * For each descriptor in the first set, this matcher finds the closest
+ * descriptor in the second set by trying each one.
+ *
+ * For efficiency, BruteForceMatcher is templated on the distance metric.
+ * For float descriptors, a common choice would be cv::L2<float>.
+ */
+class CV_EXPORTS_W BFMatcher : public DescriptorMatcher
+{
+public:
+ CV_WRAP BFMatcher( int normType=NORM_L2, bool crossCheck=false );
+ virtual ~BFMatcher() {}
+
+ virtual bool isMaskSupported() const { return true; }
+
+ virtual Ptr<DescriptorMatcher> clone( bool emptyTrainData=false ) const;
+
+ AlgorithmInfo* info() const;
+protected:
+ virtual void knnMatchImpl( const Mat& queryDescriptors, vector<vector<DMatch> >& matches, int k,
+ const vector<Mat>& masks=vector<Mat>(), bool compactResult=false );
+ virtual void radiusMatchImpl( const Mat& queryDescriptors, vector<vector<DMatch> >& matches, float maxDistance,
+ const vector<Mat>& masks=vector<Mat>(), bool compactResult=false );
+
+ int normType;
+ bool crossCheck;
+};
+
+
+/*
+ * Flann based matcher
+ */
+class CV_EXPORTS_W FlannBasedMatcher : public DescriptorMatcher
+{
+public:
+ CV_WRAP FlannBasedMatcher( const Ptr<flann::IndexParams>& indexParams=new flann::KDTreeIndexParams(),
+ const Ptr<flann::SearchParams>& searchParams=new flann::SearchParams() );
+
+ virtual void add( const vector<Mat>& descriptors );
+ virtual void clear();
+
+ // Reads matcher object from a file node
+ virtual void read( const FileNode& );
+ // Writes matcher object to a file storage
+ virtual void write( FileStorage& ) const;
+
+ virtual void train();
+ virtual bool isMaskSupported() const;
+
+ virtual Ptr<DescriptorMatcher> clone( bool emptyTrainData=false ) const;
+
+ AlgorithmInfo* info() const;
+protected:
+ static void convertToDMatches( const DescriptorCollection& descriptors,
+ const Mat& indices, const Mat& distances,
+ vector<vector<DMatch> >& matches );
+
+ virtual void knnMatchImpl( const Mat& queryDescriptors, vector<vector<DMatch> >& matches, int k,
+ const vector<Mat>& masks=vector<Mat>(), bool compactResult=false );
+ virtual void radiusMatchImpl( const Mat& queryDescriptors, vector<vector<DMatch> >& matches, float maxDistance,
+ const vector<Mat>& masks=vector<Mat>(), bool compactResult=false );
+
+ Ptr<flann::IndexParams> indexParams;
+ Ptr<flann::SearchParams> searchParams;
+ Ptr<flann::Index> flannIndex;
+
+ DescriptorCollection mergedDescriptors;
+ int addedDescCount;
+};
+
+/****************************************************************************************\
+* GenericDescriptorMatcher *
+\****************************************************************************************/
+/*
+ * Abstract interface for a keypoint descriptor and matcher
+ */
+class GenericDescriptorMatcher;
+typedef GenericDescriptorMatcher GenericDescriptorMatch;
+
+class CV_EXPORTS GenericDescriptorMatcher
+{
+public:
+ GenericDescriptorMatcher();
+ virtual ~GenericDescriptorMatcher();
+
+ /*
+ * Add train collection: images and keypoints from them.
+ * images A set of train images.
+ * ketpoints Keypoint collection that have been detected on train images.
+ *
+ * Keypoints for which a descriptor cannot be computed are removed. Such keypoints
+ * must be filtered in this method befor adding keypoints to train collection "trainPointCollection".
+ * If inheritor class need perform such prefiltering the method add() must be overloaded.
+ * In the other class methods programmer has access to the train keypoints by a constant link.
+ */
+ virtual void add( const vector<Mat>& images,
+ vector<vector<KeyPoint> >& keypoints );
+
+ const vector<Mat>& getTrainImages() const;
+ const vector<vector<KeyPoint> >& getTrainKeypoints() const;
+
+ /*
+ * Clear images and keypoints storing in train collection.
+ */
+ virtual void clear();
+ /*
+ * Returns true if matcher supports mask to match descriptors.
+ */
+ virtual bool isMaskSupported() = 0;
+ /*
+ * Train some inner structures (e.g. flann index or decision trees).
+ * train() methods is run every time in matching methods. So the method implementation
+ * should has a check whether these inner structures need be trained/retrained or not.
+ */
+ virtual void train();
+
+ /*
+ * Classifies query keypoints.
+ * queryImage The query image
+ * queryKeypoints Keypoints from the query image
+ * trainImage The train image
+ * trainKeypoints Keypoints from the train image
+ */
+ // Classify keypoints from query image under one train image.
+ void classify( const Mat& queryImage, vector<KeyPoint>& queryKeypoints,
+ const Mat& trainImage, vector<KeyPoint>& trainKeypoints ) const;
+ // Classify keypoints from query image under train image collection.
+ void classify( const Mat& queryImage, vector<KeyPoint>& queryKeypoints );
+
+ /*
+ * Group of methods to match keypoints from image pair.
+ * Keypoints for which a descriptor cannot be computed are removed.
+ * train() method is called here.
+ */
+ // Find one best match for each query descriptor (if mask is empty).
+ void match( const Mat& queryImage, vector<KeyPoint>& queryKeypoints,
+ const Mat& trainImage, vector<KeyPoint>& trainKeypoints,
+ vector<DMatch>& matches, const Mat& mask=Mat() ) const;
+ // Find k best matches for each query keypoint (in increasing order of distances).
+ // compactResult is used when mask is not empty. If compactResult is false matches
+ // vector will have the same size as queryDescriptors rows.
+ // If compactResult is true matches vector will not contain matches for fully masked out query descriptors.
+ void knnMatch( const Mat& queryImage, vector<KeyPoint>& queryKeypoints,
+ const Mat& trainImage, vector<KeyPoint>& trainKeypoints,
+ vector<vector<DMatch> >& matches, int k,
+ const Mat& mask=Mat(), bool compactResult=false ) const;
+ // Find best matches for each query descriptor which have distance less than maxDistance (in increasing order of distances).
+ void radiusMatch( const Mat& queryImage, vector<KeyPoint>& queryKeypoints,
+ const Mat& trainImage, vector<KeyPoint>& trainKeypoints,
+ vector<vector<DMatch> >& matches, float maxDistance,
+ const Mat& mask=Mat(), bool compactResult=false ) const;
+ /*
+ * Group of methods to match keypoints from one image to image set.
+ * See description of similar methods for matching image pair above.
+ */
+ void match( const Mat& queryImage, vector<KeyPoint>& queryKeypoints,
+ vector<DMatch>& matches, const vector<Mat>& masks=vector<Mat>() );
+ void knnMatch( const Mat& queryImage, vector<KeyPoint>& queryKeypoints,
+ vector<vector<DMatch> >& matches, int k,
+ const vector<Mat>& masks=vector<Mat>(), bool compactResult=false );
+ void radiusMatch( const Mat& queryImage, vector<KeyPoint>& queryKeypoints,
+ vector<vector<DMatch> >& matches, float maxDistance,
+ const vector<Mat>& masks=vector<Mat>(), bool compactResult=false );
+
+ // Reads matcher object from a file node
+ virtual void read( const FileNode& fn );
+ // Writes matcher object to a file storage
+ virtual void write( FileStorage& fs ) const;
+
+ // Return true if matching object is empty (e.g. feature detector or descriptor matcher are empty)
+ virtual bool empty() const;
+
+ // Clone the matcher. If emptyTrainData is false the method create deep copy of the object, i.e. copies
+ // both parameters and train data. If emptyTrainData is true the method create object copy with current parameters
+ // but with empty train data.
+ virtual Ptr<GenericDescriptorMatcher> clone( bool emptyTrainData=false ) const = 0;
+
+ static Ptr<GenericDescriptorMatcher> create( const string& genericDescritptorMatcherType,
+ const string &paramsFilename=string() );
+
+protected:
+ // In fact the matching is implemented only by the following two methods. These methods suppose
+ // that the class object has been trained already. Public match methods call these methods
+ // after calling train().
+ virtual void knnMatchImpl( const Mat& queryImage, vector<KeyPoint>& queryKeypoints,
+ vector<vector<DMatch> >& matches, int k,
+ const vector<Mat>& masks, bool compactResult ) = 0;
+ virtual void radiusMatchImpl( const Mat& queryImage, vector<KeyPoint>& queryKeypoints,
+ vector<vector<DMatch> >& matches, float maxDistance,
+ const vector<Mat>& masks, bool compactResult ) = 0;
+ /*
+ * A storage for sets of keypoints together with corresponding images and class IDs
+ */
+ class CV_EXPORTS KeyPointCollection
+ {
+ public:
+ KeyPointCollection();
+ KeyPointCollection( const KeyPointCollection& collection );
+ void add( const vector<Mat>& images, const vector<vector<KeyPoint> >& keypoints );
+ void clear();
+
+ // Returns the total number of keypoints in the collection
+ size_t keypointCount() const;
+ size_t imageCount() const;
+
+ const vector<vector<KeyPoint> >& getKeypoints() const;
+ const vector<KeyPoint>& getKeypoints( int imgIdx ) const;
+ const KeyPoint& getKeyPoint( int imgIdx, int localPointIdx ) const;
+ const KeyPoint& getKeyPoint( int globalPointIdx ) const;
+ void getLocalIdx( int globalPointIdx, int& imgIdx, int& localPointIdx ) const;
+
+ const vector<Mat>& getImages() const;
+ const Mat& getImage( int imgIdx ) const;
+
+ protected:
+ int pointCount;
+
+ vector<Mat> images;
+ vector<vector<KeyPoint> > keypoints;
+ // global indices of the first points in each image, startIndices.size() = keypoints.size()
+ vector<int> startIndices;
+
+ private:
+ static Mat clone_op( Mat m ) { return m.clone(); }
+ };
+
+ KeyPointCollection trainPointCollection;
+};
+
+
+/****************************************************************************************\
+* VectorDescriptorMatcher *
+\****************************************************************************************/
+
+/*
+ * A class used for matching descriptors that can be described as vectors in a finite-dimensional space
+ */
+class VectorDescriptorMatcher;
+typedef VectorDescriptorMatcher VectorDescriptorMatch;
+
+class CV_EXPORTS VectorDescriptorMatcher : public GenericDescriptorMatcher
+{
+public:
+ VectorDescriptorMatcher( const Ptr<DescriptorExtractor>& extractor, const Ptr<DescriptorMatcher>& matcher );
+ virtual ~VectorDescriptorMatcher();
+
+ virtual void add( const vector<Mat>& imgCollection,
+ vector<vector<KeyPoint> >& pointCollection );
+
+ virtual void clear();
+
+ virtual void train();
+
+ virtual bool isMaskSupported();
+
+ virtual void read( const FileNode& fn );
+ virtual void write( FileStorage& fs ) const;
+ virtual bool empty() const;
+
+ virtual Ptr<GenericDescriptorMatcher> clone( bool emptyTrainData=false ) const;
+
+protected:
+ virtual void knnMatchImpl( const Mat& queryImage, vector<KeyPoint>& queryKeypoints,
+ vector<vector<DMatch> >& matches, int k,
+ const vector<Mat>& masks, bool compactResult );
+ virtual void radiusMatchImpl( const Mat& queryImage, vector<KeyPoint>& queryKeypoints,
+ vector<vector<DMatch> >& matches, float maxDistance,
+ const vector<Mat>& masks, bool compactResult );
+
+ Ptr<DescriptorExtractor> extractor;
+ Ptr<DescriptorMatcher> matcher;
+};
+
+/****************************************************************************************\
+* Drawing functions *
+\****************************************************************************************/
+struct CV_EXPORTS DrawMatchesFlags
+{
+ enum{ DEFAULT = 0, // Output image matrix will be created (Mat::create),
+ // i.e. existing memory of output image may be reused.
+ // Two source image, matches and single keypoints will be drawn.
+ // For each keypoint only the center point will be drawn (without
+ // the circle around keypoint with keypoint size and orientation).
+ DRAW_OVER_OUTIMG = 1, // Output image matrix will not be created (Mat::create).
+ // Matches will be drawn on existing content of output image.
+ NOT_DRAW_SINGLE_POINTS = 2, // Single keypoints will not be drawn.
+ DRAW_RICH_KEYPOINTS = 4 // For each keypoint the circle around keypoint with keypoint size and
+ // orientation will be drawn.
+ };
+};
+
+// Draw keypoints.
+CV_EXPORTS_W void drawKeypoints( const Mat& image, const vector<KeyPoint>& keypoints, CV_OUT Mat& outImage,
+ const Scalar& color=Scalar::all(-1), int flags=DrawMatchesFlags::DEFAULT );
+
+// Draws matches of keypints from two images on output image.
+CV_EXPORTS void drawMatches( const Mat& img1, const vector<KeyPoint>& keypoints1,
+ const Mat& img2, const vector<KeyPoint>& keypoints2,
+ const vector<DMatch>& matches1to2, Mat& outImg,
+ const Scalar& matchColor=Scalar::all(-1), const Scalar& singlePointColor=Scalar::all(-1),
+ const vector<char>& matchesMask=vector<char>(), int flags=DrawMatchesFlags::DEFAULT );
+
+CV_EXPORTS void drawMatches( const Mat& img1, const vector<KeyPoint>& keypoints1,
+ const Mat& img2, const vector<KeyPoint>& keypoints2,
+ const vector<vector<DMatch> >& matches1to2, Mat& outImg,
+ const Scalar& matchColor=Scalar::all(-1), const Scalar& singlePointColor=Scalar::all(-1),
+ const vector<vector<char> >& matchesMask=vector<vector<char> >(), int flags=DrawMatchesFlags::DEFAULT );
+
+/****************************************************************************************\
+* Functions to evaluate the feature detectors and [generic] descriptor extractors *
+\****************************************************************************************/
+
+CV_EXPORTS void evaluateFeatureDetector( const Mat& img1, const Mat& img2, const Mat& H1to2,
+ vector<KeyPoint>* keypoints1, vector<KeyPoint>* keypoints2,
+ float& repeatability, int& correspCount,
+ const Ptr<FeatureDetector>& fdetector=Ptr<FeatureDetector>() );
+
+CV_EXPORTS void computeRecallPrecisionCurve( const vector<vector<DMatch> >& matches1to2,
+ const vector<vector<uchar> >& correctMatches1to2Mask,
+ vector<Point2f>& recallPrecisionCurve );
+
+CV_EXPORTS float getRecall( const vector<Point2f>& recallPrecisionCurve, float l_precision );
+CV_EXPORTS int getNearestPoint( const vector<Point2f>& recallPrecisionCurve, float l_precision );
+
+CV_EXPORTS void evaluateGenericDescriptorMatcher( const Mat& img1, const Mat& img2, const Mat& H1to2,
+ vector<KeyPoint>& keypoints1, vector<KeyPoint>& keypoints2,
+ vector<vector<DMatch> >* matches1to2, vector<vector<uchar> >* correctMatches1to2Mask,
+ vector<Point2f>& recallPrecisionCurve,
+ const Ptr<GenericDescriptorMatcher>& dmatch=Ptr<GenericDescriptorMatcher>() );
+
+
+/****************************************************************************************\
+* Bag of visual words *
+\****************************************************************************************/
+/*
+ * Abstract base class for training of a 'bag of visual words' vocabulary from a set of descriptors
+ */
+class CV_EXPORTS_W BOWTrainer
+{
+public:
+ BOWTrainer();
+ virtual ~BOWTrainer();
+
+ CV_WRAP void add( const Mat& descriptors );
+ CV_WRAP const vector<Mat>& getDescriptors() const;
+ CV_WRAP int descripotorsCount() const;
+
+ CV_WRAP virtual void clear();
+
+ /*
+ * Train visual words vocabulary, that is cluster training descriptors and
+ * compute cluster centers.
+ * Returns cluster centers.
+ *
+ * descriptors Training descriptors computed on images keypoints.
+ */
+ CV_WRAP virtual Mat cluster() const = 0;
+ CV_WRAP virtual Mat cluster( const Mat& descriptors ) const = 0;
+
+protected:
+ vector<Mat> descriptors;
+ int size;
+};
+
+/*
+ * This is BOWTrainer using cv::kmeans to get vocabulary.
+ */
+class CV_EXPORTS_W BOWKMeansTrainer : public BOWTrainer
+{
+public:
+ CV_WRAP BOWKMeansTrainer( int clusterCount, const TermCriteria& termcrit=TermCriteria(),
+ int attempts=3, int flags=KMEANS_PP_CENTERS );
+ virtual ~BOWKMeansTrainer();
+
+ // Returns trained vocabulary (i.e. cluster centers).
+ CV_WRAP virtual Mat cluster() const;
+ CV_WRAP virtual Mat cluster( const Mat& descriptors ) const;
+
+protected:
+
+ int clusterCount;
+ TermCriteria termcrit;
+ int attempts;
+ int flags;
+};
+
+/*
+ * Class to compute image descriptor using bag of visual words.
+ */
+class CV_EXPORTS_W BOWImgDescriptorExtractor
+{
+public:
+ CV_WRAP BOWImgDescriptorExtractor( const Ptr<DescriptorExtractor>& dextractor,
+ const Ptr<DescriptorMatcher>& dmatcher );
+ virtual ~BOWImgDescriptorExtractor();
+
+ CV_WRAP void setVocabulary( const Mat& vocabulary );
+ CV_WRAP const Mat& getVocabulary() const;
+ void compute( const Mat& image, vector<KeyPoint>& keypoints, Mat& imgDescriptor,
+ vector<vector<int> >* pointIdxsOfClusters=0, Mat* descriptors=0 );
+ // compute() is not constant because DescriptorMatcher::match is not constant
+
+ CV_WRAP_AS(compute) void compute2( const Mat& image, vector<KeyPoint>& keypoints, CV_OUT Mat& imgDescriptor )
+ { compute(image,keypoints,imgDescriptor); }
+
+ CV_WRAP int descriptorSize() const;
+ CV_WRAP int descriptorType() const;
+
+protected:
+ Mat vocabulary;
+ Ptr<DescriptorExtractor> dextractor;
+ Ptr<DescriptorMatcher> dmatcher;
+};
+
+} /* namespace cv */
+
+#endif /* __cplusplus */
+
+#endif
+
+/* End of file. */
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann.hpp
new file mode 100644
index 00000000..ea8fcd73
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann.hpp
@@ -0,0 +1,43 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#include "opencv2/flann/flann.hpp"
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/all_indices.h b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/all_indices.h
new file mode 100644
index 00000000..ff53fd84
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/all_indices.h
@@ -0,0 +1,155 @@
+/***********************************************************************
+ * Software License Agreement (BSD License)
+ *
+ * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved.
+ * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *************************************************************************/
+
+
+#ifndef OPENCV_FLANN_ALL_INDICES_H_
+#define OPENCV_FLANN_ALL_INDICES_H_
+
+#include "general.h"
+
+#include "nn_index.h"
+#include "kdtree_index.h"
+#include "kdtree_single_index.h"
+#include "kmeans_index.h"
+#include "composite_index.h"
+#include "linear_index.h"
+#include "hierarchical_clustering_index.h"
+#include "lsh_index.h"
+#include "autotuned_index.h"
+
+
+namespace cvflann
+{
+
+template<typename KDTreeCapability, typename VectorSpace, typename Distance>
+struct index_creator
+{
+ static NNIndex<Distance>* create(const Matrix<typename Distance::ElementType>& dataset, const IndexParams& params, const Distance& distance)
+ {
+ flann_algorithm_t index_type = get_param<flann_algorithm_t>(params, "algorithm");
+
+ NNIndex<Distance>* nnIndex;
+ switch (index_type) {
+ case FLANN_INDEX_LINEAR:
+ nnIndex = new LinearIndex<Distance>(dataset, params, distance);
+ break;
+ case FLANN_INDEX_KDTREE_SINGLE:
+ nnIndex = new KDTreeSingleIndex<Distance>(dataset, params, distance);
+ break;
+ case FLANN_INDEX_KDTREE:
+ nnIndex = new KDTreeIndex<Distance>(dataset, params, distance);
+ break;
+ case FLANN_INDEX_KMEANS:
+ nnIndex = new KMeansIndex<Distance>(dataset, params, distance);
+ break;
+ case FLANN_INDEX_COMPOSITE:
+ nnIndex = new CompositeIndex<Distance>(dataset, params, distance);
+ break;
+ case FLANN_INDEX_AUTOTUNED:
+ nnIndex = new AutotunedIndex<Distance>(dataset, params, distance);
+ break;
+ case FLANN_INDEX_HIERARCHICAL:
+ nnIndex = new HierarchicalClusteringIndex<Distance>(dataset, params, distance);
+ break;
+ case FLANN_INDEX_LSH:
+ nnIndex = new LshIndex<Distance>(dataset, params, distance);
+ break;
+ default:
+ throw FLANNException("Unknown index type");
+ }
+
+ return nnIndex;
+ }
+};
+
+template<typename VectorSpace, typename Distance>
+struct index_creator<False,VectorSpace,Distance>
+{
+ static NNIndex<Distance>* create(const Matrix<typename Distance::ElementType>& dataset, const IndexParams& params, const Distance& distance)
+ {
+ flann_algorithm_t index_type = get_param<flann_algorithm_t>(params, "algorithm");
+
+ NNIndex<Distance>* nnIndex;
+ switch (index_type) {
+ case FLANN_INDEX_LINEAR:
+ nnIndex = new LinearIndex<Distance>(dataset, params, distance);
+ break;
+ case FLANN_INDEX_KMEANS:
+ nnIndex = new KMeansIndex<Distance>(dataset, params, distance);
+ break;
+ case FLANN_INDEX_HIERARCHICAL:
+ nnIndex = new HierarchicalClusteringIndex<Distance>(dataset, params, distance);
+ break;
+ case FLANN_INDEX_LSH:
+ nnIndex = new LshIndex<Distance>(dataset, params, distance);
+ break;
+ default:
+ throw FLANNException("Unknown index type");
+ }
+
+ return nnIndex;
+ }
+};
+
+template<typename Distance>
+struct index_creator<False,False,Distance>
+{
+ static NNIndex<Distance>* create(const Matrix<typename Distance::ElementType>& dataset, const IndexParams& params, const Distance& distance)
+ {
+ flann_algorithm_t index_type = get_param<flann_algorithm_t>(params, "algorithm");
+
+ NNIndex<Distance>* nnIndex;
+ switch (index_type) {
+ case FLANN_INDEX_LINEAR:
+ nnIndex = new LinearIndex<Distance>(dataset, params, distance);
+ break;
+ case FLANN_INDEX_HIERARCHICAL:
+ nnIndex = new HierarchicalClusteringIndex<Distance>(dataset, params, distance);
+ break;
+ case FLANN_INDEX_LSH:
+ nnIndex = new LshIndex<Distance>(dataset, params, distance);
+ break;
+ default:
+ throw FLANNException("Unknown index type");
+ }
+
+ return nnIndex;
+ }
+};
+
+template<typename Distance>
+NNIndex<Distance>* create_index_by_type(const Matrix<typename Distance::ElementType>& dataset, const IndexParams& params, const Distance& distance)
+{
+ return index_creator<typename Distance::is_kdtree_distance,
+ typename Distance::is_vector_space_distance,
+ Distance>::create(dataset, params,distance);
+}
+
+}
+
+#endif /* OPENCV_FLANN_ALL_INDICES_H_ */
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/allocator.h b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/allocator.h
new file mode 100644
index 00000000..26091d0c
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/allocator.h
@@ -0,0 +1,188 @@
+/***********************************************************************
+ * Software License Agreement (BSD License)
+ *
+ * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved.
+ * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved.
+ *
+ * THE BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *************************************************************************/
+
+#ifndef OPENCV_FLANN_ALLOCATOR_H_
+#define OPENCV_FLANN_ALLOCATOR_H_
+
+#include <stdlib.h>
+#include <stdio.h>
+
+
+namespace cvflann
+{
+
+/**
+ * Allocates (using C's malloc) a generic type T.
+ *
+ * Params:
+ * count = number of instances to allocate.
+ * Returns: pointer (of type T*) to memory buffer
+ */
+template <typename T>
+T* allocate(size_t count = 1)
+{
+ T* mem = (T*) ::malloc(sizeof(T)*count);
+ return mem;
+}
+
+
+/**
+ * Pooled storage allocator
+ *
+ * The following routines allow for the efficient allocation of storage in
+ * small chunks from a specified pool. Rather than allowing each structure
+ * to be freed individually, an entire pool of storage is freed at once.
+ * This method has two advantages over just using malloc() and free(). First,
+ * it is far more efficient for allocating small objects, as there is
+ * no overhead for remembering all the information needed to free each
+ * object or consolidating fragmented memory. Second, the decision about
+ * how long to keep an object is made at the time of allocation, and there
+ * is no need to track down all the objects to free them.
+ *
+ */
+
+const size_t WORDSIZE=16;
+const size_t BLOCKSIZE=8192;
+
+class PooledAllocator
+{
+ /* We maintain memory alignment to word boundaries by requiring that all
+ allocations be in multiples of the machine wordsize. */
+ /* Size of machine word in bytes. Must be power of 2. */
+ /* Minimum number of bytes requested at a time from the system. Must be multiple of WORDSIZE. */
+
+
+ int remaining; /* Number of bytes left in current block of storage. */
+ void* base; /* Pointer to base of current block of storage. */
+ void* loc; /* Current location in block to next allocate memory. */
+ int blocksize;
+
+
+public:
+ int usedMemory;
+ int wastedMemory;
+
+ /**
+ Default constructor. Initializes a new pool.
+ */
+ PooledAllocator(int blockSize = BLOCKSIZE)
+ {
+ blocksize = blockSize;
+ remaining = 0;
+ base = NULL;
+
+ usedMemory = 0;
+ wastedMemory = 0;
+ }
+
+ /**
+ * Destructor. Frees all the memory allocated in this pool.
+ */
+ ~PooledAllocator()
+ {
+ void* prev;
+
+ while (base != NULL) {
+ prev = *((void**) base); /* Get pointer to prev block. */
+ ::free(base);
+ base = prev;
+ }
+ }
+
+ /**
+ * Returns a pointer to a piece of new memory of the given size in bytes
+ * allocated from the pool.
+ */
+ void* allocateMemory(int size)
+ {
+ int blockSize;
+
+ /* Round size up to a multiple of wordsize. The following expression
+ only works for WORDSIZE that is a power of 2, by masking last bits of
+ incremented size to zero.
+ */
+ size = (size + (WORDSIZE - 1)) & ~(WORDSIZE - 1);
+
+ /* Check whether a new block must be allocated. Note that the first word
+ of a block is reserved for a pointer to the previous block.
+ */
+ if (size > remaining) {
+
+ wastedMemory += remaining;
+
+ /* Allocate new storage. */
+ blockSize = (size + sizeof(void*) + (WORDSIZE-1) > BLOCKSIZE) ?
+ size + sizeof(void*) + (WORDSIZE-1) : BLOCKSIZE;
+
+ // use the standard C malloc to allocate memory
+ void* m = ::malloc(blockSize);
+ if (!m) {
+ fprintf(stderr,"Failed to allocate memory.\n");
+ return NULL;
+ }
+
+ /* Fill first word of new block with pointer to previous block. */
+ ((void**) m)[0] = base;
+ base = m;
+
+ int shift = 0;
+ //int shift = (WORDSIZE - ( (((size_t)m) + sizeof(void*)) & (WORDSIZE-1))) & (WORDSIZE-1);
+
+ remaining = blockSize - sizeof(void*) - shift;
+ loc = ((char*)m + sizeof(void*) + shift);
+ }
+ void* rloc = loc;
+ loc = (char*)loc + size;
+ remaining -= size;
+
+ usedMemory += size;
+
+ return rloc;
+ }
+
+ /**
+ * Allocates (using this pool) a generic type T.
+ *
+ * Params:
+ * count = number of instances to allocate.
+ * Returns: pointer (of type T*) to memory buffer
+ */
+ template <typename T>
+ T* allocate(size_t count = 1)
+ {
+ T* mem = (T*) this->allocateMemory((int)(sizeof(T)*count));
+ return mem;
+ }
+
+};
+
+}
+
+#endif //OPENCV_FLANN_ALLOCATOR_H_
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/any.h b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/any.h
new file mode 100644
index 00000000..7e3fd797
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/any.h
@@ -0,0 +1,318 @@
+#ifndef OPENCV_FLANN_ANY_H_
+#define OPENCV_FLANN_ANY_H_
+/*
+ * (C) Copyright Christopher Diggins 2005-2011
+ * (C) Copyright Pablo Aguilar 2005
+ * (C) Copyright Kevlin Henney 2001
+ *
+ * Distributed under the Boost Software License, Version 1.0. (See
+ * accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt
+ *
+ * Adapted for FLANN by Marius Muja
+ */
+
+#include "defines.h"
+#include <stdexcept>
+#include <ostream>
+#include <typeinfo>
+
+namespace cvflann
+{
+
+namespace anyimpl
+{
+
+struct bad_any_cast
+{
+};
+
+struct empty_any
+{
+};
+
+inline std::ostream& operator <<(std::ostream& out, const empty_any&)
+{
+ out << "[empty_any]";
+ return out;
+}
+
+struct base_any_policy
+{
+ virtual void static_delete(void** x) = 0;
+ virtual void copy_from_value(void const* src, void** dest) = 0;
+ virtual void clone(void* const* src, void** dest) = 0;
+ virtual void move(void* const* src, void** dest) = 0;
+ virtual void* get_value(void** src) = 0;
+ virtual ::size_t get_size() = 0;
+ virtual const std::type_info& type() = 0;
+ virtual void print(std::ostream& out, void* const* src) = 0;
+
+#ifdef OPENCV_CAN_BREAK_BINARY_COMPATIBILITY
+ virtual ~base_any_policy() {}
+#endif
+};
+
+template<typename T>
+struct typed_base_any_policy : base_any_policy
+{
+ virtual ::size_t get_size() { return sizeof(T); }
+ virtual const std::type_info& type() { return typeid(T); }
+
+};
+
+template<typename T>
+struct small_any_policy : typed_base_any_policy<T>
+{
+ virtual void static_delete(void**) { }
+ virtual void copy_from_value(void const* src, void** dest)
+ {
+ new (dest) T(* reinterpret_cast<T const*>(src));
+ }
+ virtual void clone(void* const* src, void** dest) { *dest = *src; }
+ virtual void move(void* const* src, void** dest) { *dest = *src; }
+ virtual void* get_value(void** src) { return reinterpret_cast<void*>(src); }
+ virtual void print(std::ostream& out, void* const* src) { out << *reinterpret_cast<T const*>(src); }
+};
+
+template<typename T>
+struct big_any_policy : typed_base_any_policy<T>
+{
+ virtual void static_delete(void** x)
+ {
+ if (* x) delete (* reinterpret_cast<T**>(x)); *x = NULL;
+ }
+ virtual void copy_from_value(void const* src, void** dest)
+ {
+ *dest = new T(*reinterpret_cast<T const*>(src));
+ }
+ virtual void clone(void* const* src, void** dest)
+ {
+ *dest = new T(**reinterpret_cast<T* const*>(src));
+ }
+ virtual void move(void* const* src, void** dest)
+ {
+ (*reinterpret_cast<T**>(dest))->~T();
+ **reinterpret_cast<T**>(dest) = **reinterpret_cast<T* const*>(src);
+ }
+ virtual void* get_value(void** src) { return *src; }
+ virtual void print(std::ostream& out, void* const* src) { out << *reinterpret_cast<T const*>(*src); }
+};
+
+template<> inline void big_any_policy<flann_centers_init_t>::print(std::ostream& out, void* const* src)
+{
+ out << int(*reinterpret_cast<flann_centers_init_t const*>(*src));
+}
+
+template<> inline void big_any_policy<flann_algorithm_t>::print(std::ostream& out, void* const* src)
+{
+ out << int(*reinterpret_cast<flann_algorithm_t const*>(*src));
+}
+
+template<typename T>
+struct choose_policy
+{
+ typedef big_any_policy<T> type;
+};
+
+template<typename T>
+struct choose_policy<T*>
+{
+ typedef small_any_policy<T*> type;
+};
+
+struct any;
+
+/// Choosing the policy for an any type is illegal, but should never happen.
+/// This is designed to throw a compiler error.
+template<>
+struct choose_policy<any>
+{
+ typedef void type;
+};
+
+/// Specializations for small types.
+#define SMALL_POLICY(TYPE) \
+ template<> \
+ struct choose_policy<TYPE> { typedef small_any_policy<TYPE> type; \
+ }
+
+SMALL_POLICY(signed char);
+SMALL_POLICY(unsigned char);
+SMALL_POLICY(signed short);
+SMALL_POLICY(unsigned short);
+SMALL_POLICY(signed int);
+SMALL_POLICY(unsigned int);
+SMALL_POLICY(signed long);
+SMALL_POLICY(unsigned long);
+SMALL_POLICY(float);
+SMALL_POLICY(bool);
+
+#undef SMALL_POLICY
+
+template <typename T>
+class SinglePolicy
+{
+ SinglePolicy();
+ SinglePolicy(const SinglePolicy& other);
+ SinglePolicy& operator=(const SinglePolicy& other);
+
+public:
+ static base_any_policy* get_policy();
+
+private:
+ static typename choose_policy<T>::type policy;
+};
+
+template <typename T>
+typename choose_policy<T>::type SinglePolicy<T>::policy;
+
+/// This function will return a different policy for each type.
+template <typename T>
+inline base_any_policy* SinglePolicy<T>::get_policy() { return &policy; }
+
+} // namespace anyimpl
+
+struct any
+{
+private:
+ // fields
+ anyimpl::base_any_policy* policy;
+ void* object;
+
+public:
+ /// Initializing constructor.
+ template <typename T>
+ any(const T& x)
+ : policy(anyimpl::SinglePolicy<anyimpl::empty_any>::get_policy()), object(NULL)
+ {
+ assign(x);
+ }
+
+ /// Empty constructor.
+ any()
+ : policy(anyimpl::SinglePolicy<anyimpl::empty_any>::get_policy()), object(NULL)
+ { }
+
+ /// Special initializing constructor for string literals.
+ any(const char* x)
+ : policy(anyimpl::SinglePolicy<anyimpl::empty_any>::get_policy()), object(NULL)
+ {
+ assign(x);
+ }
+
+ /// Copy constructor.
+ any(const any& x)
+ : policy(anyimpl::SinglePolicy<anyimpl::empty_any>::get_policy()), object(NULL)
+ {
+ assign(x);
+ }
+
+ /// Destructor.
+ ~any()
+ {
+ policy->static_delete(&object);
+ }
+
+ /// Assignment function from another any.
+ any& assign(const any& x)
+ {
+ reset();
+ policy = x.policy;
+ policy->clone(&x.object, &object);
+ return *this;
+ }
+
+ /// Assignment function.
+ template <typename T>
+ any& assign(const T& x)
+ {
+ reset();
+ policy = anyimpl::SinglePolicy<T>::get_policy();
+ policy->copy_from_value(&x, &object);
+ return *this;
+ }
+
+ /// Assignment operator.
+ template<typename T>
+ any& operator=(const T& x)
+ {
+ return assign(x);
+ }
+
+ /// Assignment operator, specialed for literal strings.
+ /// They have types like const char [6] which don't work as expected.
+ any& operator=(const char* x)
+ {
+ return assign(x);
+ }
+
+ /// Utility functions
+ any& swap(any& x)
+ {
+ std::swap(policy, x.policy);
+ std::swap(object, x.object);
+ return *this;
+ }
+
+ /// Cast operator. You can only cast to the original type.
+ template<typename T>
+ T& cast()
+ {
+ if (policy->type() != typeid(T)) throw anyimpl::bad_any_cast();
+ T* r = reinterpret_cast<T*>(policy->get_value(&object));
+ return *r;
+ }
+
+ /// Cast operator. You can only cast to the original type.
+ template<typename T>
+ const T& cast() const
+ {
+ if (policy->type() != typeid(T)) throw anyimpl::bad_any_cast();
+ T* r = reinterpret_cast<T*>(policy->get_value(const_cast<void **>(&object)));
+ return *r;
+ }
+
+ /// Returns true if the any contains no value.
+ bool empty() const
+ {
+ return policy->type() == typeid(anyimpl::empty_any);
+ }
+
+ /// Frees any allocated memory, and sets the value to NULL.
+ void reset()
+ {
+ policy->static_delete(&object);
+ policy = anyimpl::SinglePolicy<anyimpl::empty_any>::get_policy();
+ }
+
+ /// Returns true if the two types are the same.
+ bool compatible(const any& x) const
+ {
+ return policy->type() == x.policy->type();
+ }
+
+ /// Returns if the type is compatible with the policy
+ template<typename T>
+ bool has_type()
+ {
+ return policy->type() == typeid(T);
+ }
+
+ const std::type_info& type() const
+ {
+ return policy->type();
+ }
+
+ friend std::ostream& operator <<(std::ostream& out, const any& any_val);
+};
+
+inline std::ostream& operator <<(std::ostream& out, const any& any_val)
+{
+ any_val.policy->print(out,&any_val.object);
+ return out;
+}
+
+}
+
+#endif // OPENCV_FLANN_ANY_H_
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/autotuned_index.h b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/autotuned_index.h
new file mode 100644
index 00000000..454641e6
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/autotuned_index.h
@@ -0,0 +1,595 @@
+/***********************************************************************
+ * Software License Agreement (BSD License)
+ *
+ * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved.
+ * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved.
+ *
+ * THE BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *************************************************************************/
+#ifndef OPENCV_FLANN_AUTOTUNED_INDEX_H_
+#define OPENCV_FLANN_AUTOTUNED_INDEX_H_
+
+#include "general.h"
+#include "nn_index.h"
+#include "ground_truth.h"
+#include "index_testing.h"
+#include "sampling.h"
+#include "kdtree_index.h"
+#include "kdtree_single_index.h"
+#include "kmeans_index.h"
+#include "composite_index.h"
+#include "linear_index.h"
+#include "logger.h"
+
+namespace cvflann
+{
+
+template<typename Distance>
+NNIndex<Distance>* create_index_by_type(const Matrix<typename Distance::ElementType>& dataset, const IndexParams& params, const Distance& distance);
+
+
+struct AutotunedIndexParams : public IndexParams
+{
+ AutotunedIndexParams(float target_precision = 0.8, float build_weight = 0.01, float memory_weight = 0, float sample_fraction = 0.1)
+ {
+ (*this)["algorithm"] = FLANN_INDEX_AUTOTUNED;
+ // precision desired (used for autotuning, -1 otherwise)
+ (*this)["target_precision"] = target_precision;
+ // build tree time weighting factor
+ (*this)["build_weight"] = build_weight;
+ // index memory weighting factor
+ (*this)["memory_weight"] = memory_weight;
+ // what fraction of the dataset to use for autotuning
+ (*this)["sample_fraction"] = sample_fraction;
+ }
+};
+
+
+template <typename Distance>
+class AutotunedIndex : public NNIndex<Distance>
+{
+public:
+ typedef typename Distance::ElementType ElementType;
+ typedef typename Distance::ResultType DistanceType;
+
+ AutotunedIndex(const Matrix<ElementType>& inputData, const IndexParams& params = AutotunedIndexParams(), Distance d = Distance()) :
+ dataset_(inputData), distance_(d)
+ {
+ target_precision_ = get_param(params, "target_precision",0.8f);
+ build_weight_ = get_param(params,"build_weight", 0.01f);
+ memory_weight_ = get_param(params, "memory_weight", 0.0f);
+ sample_fraction_ = get_param(params,"sample_fraction", 0.1f);
+ bestIndex_ = NULL;
+ }
+
+ AutotunedIndex(const AutotunedIndex&);
+ AutotunedIndex& operator=(const AutotunedIndex&);
+
+ virtual ~AutotunedIndex()
+ {
+ if (bestIndex_ != NULL) {
+ delete bestIndex_;
+ bestIndex_ = NULL;
+ }
+ }
+
+ /**
+ * Dummy implementation for other algorithms of addable indexes after that.
+ */
+ void addIndex(const Matrix<ElementType>& /*wholeData*/, const Matrix<ElementType>& /*additionalData*/)
+ {
+ }
+
+ /**
+ * Method responsible with building the index.
+ */
+ virtual void buildIndex()
+ {
+ std::ostringstream stream;
+ bestParams_ = estimateBuildParams();
+ print_params(bestParams_, stream);
+ Logger::info("----------------------------------------------------\n");
+ Logger::info("Autotuned parameters:\n");
+ Logger::info("%s", stream.str().c_str());
+ Logger::info("----------------------------------------------------\n");
+
+ bestIndex_ = create_index_by_type(dataset_, bestParams_, distance_);
+ bestIndex_->buildIndex();
+ speedup_ = estimateSearchParams(bestSearchParams_);
+ stream.str(std::string());
+ print_params(bestSearchParams_, stream);
+ Logger::info("----------------------------------------------------\n");
+ Logger::info("Search parameters:\n");
+ Logger::info("%s", stream.str().c_str());
+ Logger::info("----------------------------------------------------\n");
+ }
+
+ /**
+ * Saves the index to a stream
+ */
+ virtual void saveIndex(FILE* stream)
+ {
+ save_value(stream, (int)bestIndex_->getType());
+ bestIndex_->saveIndex(stream);
+ save_value(stream, get_param<int>(bestSearchParams_, "checks"));
+ }
+
+ /**
+ * Loads the index from a stream
+ */
+ virtual void loadIndex(FILE* stream)
+ {
+ int index_type;
+
+ load_value(stream, index_type);
+ IndexParams params;
+ params["algorithm"] = (flann_algorithm_t)index_type;
+ bestIndex_ = create_index_by_type<Distance>(dataset_, params, distance_);
+ bestIndex_->loadIndex(stream);
+ int checks;
+ load_value(stream, checks);
+ bestSearchParams_["checks"] = checks;
+ }
+
+ /**
+ * Method that searches for nearest-neighbors
+ */
+ virtual void findNeighbors(ResultSet<DistanceType>& result, const ElementType* vec, const SearchParams& searchParams)
+ {
+ int checks = get_param<int>(searchParams,"checks",FLANN_CHECKS_AUTOTUNED);
+ if (checks == FLANN_CHECKS_AUTOTUNED) {
+ bestIndex_->findNeighbors(result, vec, bestSearchParams_);
+ }
+ else {
+ bestIndex_->findNeighbors(result, vec, searchParams);
+ }
+ }
+
+
+ IndexParams getParameters() const
+ {
+ return bestIndex_->getParameters();
+ }
+
+ SearchParams getSearchParameters() const
+ {
+ return bestSearchParams_;
+ }
+
+ float getSpeedup() const
+ {
+ return speedup_;
+ }
+
+
+ /**
+ * Number of features in this index.
+ */
+ virtual size_t size() const
+ {
+ return bestIndex_->size();
+ }
+
+ /**
+ * The length of each vector in this index.
+ */
+ virtual size_t veclen() const
+ {
+ return bestIndex_->veclen();
+ }
+
+ /**
+ * The amount of memory (in bytes) this index uses.
+ */
+ virtual int usedMemory() const
+ {
+ return bestIndex_->usedMemory();
+ }
+
+ /**
+ * Algorithm name
+ */
+ virtual flann_algorithm_t getType() const
+ {
+ return FLANN_INDEX_AUTOTUNED;
+ }
+
+private:
+
+ struct CostData
+ {
+ float searchTimeCost;
+ float buildTimeCost;
+ float memoryCost;
+ float totalCost;
+ IndexParams params;
+ };
+
+ void evaluate_kmeans(CostData& cost)
+ {
+ StartStopTimer t;
+ int checks;
+ const int nn = 1;
+
+ Logger::info("KMeansTree using params: max_iterations=%d, branching=%d\n",
+ get_param<int>(cost.params,"iterations"),
+ get_param<int>(cost.params,"branching"));
+ KMeansIndex<Distance> kmeans(sampledDataset_, cost.params, distance_);
+ // measure index build time
+ t.start();
+ kmeans.buildIndex();
+ t.stop();
+ float buildTime = (float)t.value;
+
+ // measure search time
+ float searchTime = test_index_precision(kmeans, sampledDataset_, testDataset_, gt_matches_, target_precision_, checks, distance_, nn);
+
+ float datasetMemory = float(sampledDataset_.rows * sampledDataset_.cols * sizeof(float));
+ cost.memoryCost = (kmeans.usedMemory() + datasetMemory) / datasetMemory;
+ cost.searchTimeCost = searchTime;
+ cost.buildTimeCost = buildTime;
+ Logger::info("KMeansTree buildTime=%g, searchTime=%g, build_weight=%g\n", buildTime, searchTime, build_weight_);
+ }
+
+
+ void evaluate_kdtree(CostData& cost)
+ {
+ StartStopTimer t;
+ int checks;
+ const int nn = 1;
+
+ Logger::info("KDTree using params: trees=%d\n", get_param<int>(cost.params,"trees"));
+ KDTreeIndex<Distance> kdtree(sampledDataset_, cost.params, distance_);
+
+ t.start();
+ kdtree.buildIndex();
+ t.stop();
+ float buildTime = (float)t.value;
+
+ //measure search time
+ float searchTime = test_index_precision(kdtree, sampledDataset_, testDataset_, gt_matches_, target_precision_, checks, distance_, nn);
+
+ float datasetMemory = float(sampledDataset_.rows * sampledDataset_.cols * sizeof(float));
+ cost.memoryCost = (kdtree.usedMemory() + datasetMemory) / datasetMemory;
+ cost.searchTimeCost = searchTime;
+ cost.buildTimeCost = buildTime;
+ Logger::info("KDTree buildTime=%g, searchTime=%g\n", buildTime, searchTime);
+ }
+
+
+ // struct KMeansSimpleDownhillFunctor {
+ //
+ // Autotune& autotuner;
+ // KMeansSimpleDownhillFunctor(Autotune& autotuner_) : autotuner(autotuner_) {};
+ //
+ // float operator()(int* params) {
+ //
+ // float maxFloat = numeric_limits<float>::max();
+ //
+ // if (params[0]<2) return maxFloat;
+ // if (params[1]<0) return maxFloat;
+ //
+ // CostData c;
+ // c.params["algorithm"] = KMEANS;
+ // c.params["centers-init"] = CENTERS_RANDOM;
+ // c.params["branching"] = params[0];
+ // c.params["max-iterations"] = params[1];
+ //
+ // autotuner.evaluate_kmeans(c);
+ //
+ // return c.timeCost;
+ //
+ // }
+ // };
+ //
+ // struct KDTreeSimpleDownhillFunctor {
+ //
+ // Autotune& autotuner;
+ // KDTreeSimpleDownhillFunctor(Autotune& autotuner_) : autotuner(autotuner_) {};
+ //
+ // float operator()(int* params) {
+ // float maxFloat = numeric_limits<float>::max();
+ //
+ // if (params[0]<1) return maxFloat;
+ //
+ // CostData c;
+ // c.params["algorithm"] = KDTREE;
+ // c.params["trees"] = params[0];
+ //
+ // autotuner.evaluate_kdtree(c);
+ //
+ // return c.timeCost;
+ //
+ // }
+ // };
+
+
+
+ void optimizeKMeans(std::vector<CostData>& costs)
+ {
+ Logger::info("KMEANS, Step 1: Exploring parameter space\n");
+
+ // explore kmeans parameters space using combinations of the parameters below
+ int maxIterations[] = { 1, 5, 10, 15 };
+ int branchingFactors[] = { 16, 32, 64, 128, 256 };
+
+ int kmeansParamSpaceSize = FLANN_ARRAY_LEN(maxIterations) * FLANN_ARRAY_LEN(branchingFactors);
+ costs.reserve(costs.size() + kmeansParamSpaceSize);
+
+ // evaluate kmeans for all parameter combinations
+ for (size_t i = 0; i < FLANN_ARRAY_LEN(maxIterations); ++i) {
+ for (size_t j = 0; j < FLANN_ARRAY_LEN(branchingFactors); ++j) {
+ CostData cost;
+ cost.params["algorithm"] = FLANN_INDEX_KMEANS;
+ cost.params["centers_init"] = FLANN_CENTERS_RANDOM;
+ cost.params["iterations"] = maxIterations[i];
+ cost.params["branching"] = branchingFactors[j];
+
+ evaluate_kmeans(cost);
+ costs.push_back(cost);
+ }
+ }
+
+ // Logger::info("KMEANS, Step 2: simplex-downhill optimization\n");
+ //
+ // const int n = 2;
+ // // choose initial simplex points as the best parameters so far
+ // int kmeansNMPoints[n*(n+1)];
+ // float kmeansVals[n+1];
+ // for (int i=0;i<n+1;++i) {
+ // kmeansNMPoints[i*n] = (int)kmeansCosts[i].params["branching"];
+ // kmeansNMPoints[i*n+1] = (int)kmeansCosts[i].params["max-iterations"];
+ // kmeansVals[i] = kmeansCosts[i].timeCost;
+ // }
+ // KMeansSimpleDownhillFunctor kmeans_cost_func(*this);
+ // // run optimization
+ // optimizeSimplexDownhill(kmeansNMPoints,n,kmeans_cost_func,kmeansVals);
+ // // store results
+ // for (int i=0;i<n+1;++i) {
+ // kmeansCosts[i].params["branching"] = kmeansNMPoints[i*2];
+ // kmeansCosts[i].params["max-iterations"] = kmeansNMPoints[i*2+1];
+ // kmeansCosts[i].timeCost = kmeansVals[i];
+ // }
+ }
+
+
+ void optimizeKDTree(std::vector<CostData>& costs)
+ {
+ Logger::info("KD-TREE, Step 1: Exploring parameter space\n");
+
+ // explore kd-tree parameters space using the parameters below
+ int testTrees[] = { 1, 4, 8, 16, 32 };
+
+ // evaluate kdtree for all parameter combinations
+ for (size_t i = 0; i < FLANN_ARRAY_LEN(testTrees); ++i) {
+ CostData cost;
+ cost.params["algorithm"] = FLANN_INDEX_KDTREE;
+ cost.params["trees"] = testTrees[i];
+
+ evaluate_kdtree(cost);
+ costs.push_back(cost);
+ }
+
+ // Logger::info("KD-TREE, Step 2: simplex-downhill optimization\n");
+ //
+ // const int n = 1;
+ // // choose initial simplex points as the best parameters so far
+ // int kdtreeNMPoints[n*(n+1)];
+ // float kdtreeVals[n+1];
+ // for (int i=0;i<n+1;++i) {
+ // kdtreeNMPoints[i] = (int)kdtreeCosts[i].params["trees"];
+ // kdtreeVals[i] = kdtreeCosts[i].timeCost;
+ // }
+ // KDTreeSimpleDownhillFunctor kdtree_cost_func(*this);
+ // // run optimization
+ // optimizeSimplexDownhill(kdtreeNMPoints,n,kdtree_cost_func,kdtreeVals);
+ // // store results
+ // for (int i=0;i<n+1;++i) {
+ // kdtreeCosts[i].params["trees"] = kdtreeNMPoints[i];
+ // kdtreeCosts[i].timeCost = kdtreeVals[i];
+ // }
+ }
+
+ /**
+ * Chooses the best nearest-neighbor algorithm and estimates the optimal
+ * parameters to use when building the index (for a given precision).
+ * Returns a dictionary with the optimal parameters.
+ */
+ IndexParams estimateBuildParams()
+ {
+ std::vector<CostData> costs;
+
+ int sampleSize = int(sample_fraction_ * dataset_.rows);
+ int testSampleSize = std::min(sampleSize / 10, 1000);
+
+ Logger::info("Entering autotuning, dataset size: %d, sampleSize: %d, testSampleSize: %d, target precision: %g\n", dataset_.rows, sampleSize, testSampleSize, target_precision_);
+
+ // For a very small dataset, it makes no sense to build any fancy index, just
+ // use linear search
+ if (testSampleSize < 10) {
+ Logger::info("Choosing linear, dataset too small\n");
+ return LinearIndexParams();
+ }
+
+ // We use a fraction of the original dataset to speedup the autotune algorithm
+ sampledDataset_ = random_sample(dataset_, sampleSize);
+ // We use a cross-validation approach, first we sample a testset from the dataset
+ testDataset_ = random_sample(sampledDataset_, testSampleSize, true);
+
+ // We compute the ground truth using linear search
+ Logger::info("Computing ground truth... \n");
+ gt_matches_ = Matrix<int>(new int[testDataset_.rows], testDataset_.rows, 1);
+ StartStopTimer t;
+ t.start();
+ compute_ground_truth<Distance>(sampledDataset_, testDataset_, gt_matches_, 0, distance_);
+ t.stop();
+
+ CostData linear_cost;
+ linear_cost.searchTimeCost = (float)t.value;
+ linear_cost.buildTimeCost = 0;
+ linear_cost.memoryCost = 0;
+ linear_cost.params["algorithm"] = FLANN_INDEX_LINEAR;
+
+ costs.push_back(linear_cost);
+
+ // Start parameter autotune process
+ Logger::info("Autotuning parameters...\n");
+
+ optimizeKMeans(costs);
+ optimizeKDTree(costs);
+
+ float bestTimeCost = costs[0].searchTimeCost;
+ for (size_t i = 0; i < costs.size(); ++i) {
+ float timeCost = costs[i].buildTimeCost * build_weight_ + costs[i].searchTimeCost;
+ if (timeCost < bestTimeCost) {
+ bestTimeCost = timeCost;
+ }
+ }
+
+ float bestCost = costs[0].searchTimeCost / bestTimeCost;
+ IndexParams bestParams = costs[0].params;
+ if (bestTimeCost > 0) {
+ for (size_t i = 0; i < costs.size(); ++i) {
+ float crtCost = (costs[i].buildTimeCost * build_weight_ + costs[i].searchTimeCost) / bestTimeCost +
+ memory_weight_ * costs[i].memoryCost;
+ if (crtCost < bestCost) {
+ bestCost = crtCost;
+ bestParams = costs[i].params;
+ }
+ }
+ }
+
+ delete[] gt_matches_.data;
+ delete[] testDataset_.data;
+ delete[] sampledDataset_.data;
+
+ return bestParams;
+ }
+
+
+
+ /**
+ * Estimates the search time parameters needed to get the desired precision.
+ * Precondition: the index is built
+ * Postcondition: the searchParams will have the optimum params set, also the speedup obtained over linear search.
+ */
+ float estimateSearchParams(SearchParams& searchParams)
+ {
+ const int nn = 1;
+ const size_t SAMPLE_COUNT = 1000;
+
+ assert(bestIndex_ != NULL); // must have a valid index
+
+ float speedup = 0;
+
+ int samples = (int)std::min(dataset_.rows / 10, SAMPLE_COUNT);
+ if (samples > 0) {
+ Matrix<ElementType> testDataset = random_sample(dataset_, samples);
+
+ Logger::info("Computing ground truth\n");
+
+ // we need to compute the ground truth first
+ Matrix<int> gt_matches(new int[testDataset.rows], testDataset.rows, 1);
+ StartStopTimer t;
+ t.start();
+ compute_ground_truth<Distance>(dataset_, testDataset, gt_matches, 1, distance_);
+ t.stop();
+ float linear = (float)t.value;
+
+ int checks;
+ Logger::info("Estimating number of checks\n");
+
+ float searchTime;
+ float cb_index;
+ if (bestIndex_->getType() == FLANN_INDEX_KMEANS) {
+ Logger::info("KMeans algorithm, estimating cluster border factor\n");
+ KMeansIndex<Distance>* kmeans = (KMeansIndex<Distance>*)bestIndex_;
+ float bestSearchTime = -1;
+ float best_cb_index = -1;
+ int best_checks = -1;
+ for (cb_index = 0; cb_index < 1.1f; cb_index += 0.2f) {
+ kmeans->set_cb_index(cb_index);
+ searchTime = test_index_precision(*kmeans, dataset_, testDataset, gt_matches, target_precision_, checks, distance_, nn, 1);
+ if ((searchTime < bestSearchTime) || (bestSearchTime == -1)) {
+ bestSearchTime = searchTime;
+ best_cb_index = cb_index;
+ best_checks = checks;
+ }
+ }
+ searchTime = bestSearchTime;
+ cb_index = best_cb_index;
+ checks = best_checks;
+
+ kmeans->set_cb_index(best_cb_index);
+ Logger::info("Optimum cb_index: %g\n", cb_index);
+ bestParams_["cb_index"] = cb_index;
+ }
+ else {
+ searchTime = test_index_precision(*bestIndex_, dataset_, testDataset, gt_matches, target_precision_, checks, distance_, nn, 1);
+ }
+
+ Logger::info("Required number of checks: %d \n", checks);
+ searchParams["checks"] = checks;
+
+ speedup = linear / searchTime;
+
+ delete[] gt_matches.data;
+ delete[] testDataset.data;
+ }
+
+ return speedup;
+ }
+
+private:
+ NNIndex<Distance>* bestIndex_;
+
+ IndexParams bestParams_;
+ SearchParams bestSearchParams_;
+
+ Matrix<ElementType> sampledDataset_;
+ Matrix<ElementType> testDataset_;
+ Matrix<int> gt_matches_;
+
+ float speedup_;
+
+ /**
+ * The dataset used by this index
+ */
+ const Matrix<ElementType> dataset_;
+
+ /**
+ * Index parameters
+ */
+ float target_precision_;
+ float build_weight_;
+ float memory_weight_;
+ float sample_fraction_;
+
+ Distance distance_;
+
+
+};
+}
+
+#endif /* OPENCV_FLANN_AUTOTUNED_INDEX_H_ */
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/composite_index.h b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/composite_index.h
new file mode 100644
index 00000000..02b7bc1f
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/composite_index.h
@@ -0,0 +1,201 @@
+/***********************************************************************
+ * Software License Agreement (BSD License)
+ *
+ * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved.
+ * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved.
+ *
+ * THE BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *************************************************************************/
+
+#ifndef OPENCV_FLANN_COMPOSITE_INDEX_H_
+#define OPENCV_FLANN_COMPOSITE_INDEX_H_
+
+#include "general.h"
+#include "nn_index.h"
+#include "kdtree_index.h"
+#include "kmeans_index.h"
+
+namespace cvflann
+{
+
+/**
+ * Index parameters for the CompositeIndex.
+ */
+struct CompositeIndexParams : public IndexParams
+{
+ CompositeIndexParams(int trees = 4, int branching = 32, int iterations = 11,
+ flann_centers_init_t centers_init = FLANN_CENTERS_RANDOM, float cb_index = 0.2 )
+ {
+ (*this)["algorithm"] = FLANN_INDEX_KMEANS;
+ // number of randomized trees to use (for kdtree)
+ (*this)["trees"] = trees;
+ // branching factor
+ (*this)["branching"] = branching;
+ // max iterations to perform in one kmeans clustering (kmeans tree)
+ (*this)["iterations"] = iterations;
+ // algorithm used for picking the initial cluster centers for kmeans tree
+ (*this)["centers_init"] = centers_init;
+ // cluster boundary index. Used when searching the kmeans tree
+ (*this)["cb_index"] = cb_index;
+ }
+};
+
+
+/**
+ * This index builds a kd-tree index and a k-means index and performs nearest
+ * neighbour search both indexes. This gives a slight boost in search performance
+ * as some of the neighbours that are missed by one index are found by the other.
+ */
+template <typename Distance>
+class CompositeIndex : public NNIndex<Distance>
+{
+public:
+ typedef typename Distance::ElementType ElementType;
+ typedef typename Distance::ResultType DistanceType;
+
+ /**
+ * Index constructor
+ * @param inputData dataset containing the points to index
+ * @param params Index parameters
+ * @param d Distance functor
+ * @return
+ */
+ CompositeIndex(const Matrix<ElementType>& inputData, const IndexParams& params = CompositeIndexParams(),
+ Distance d = Distance()) : index_params_(params)
+ {
+ kdtree_index_ = new KDTreeIndex<Distance>(inputData, params, d);
+ kmeans_index_ = new KMeansIndex<Distance>(inputData, params, d);
+
+ }
+
+ CompositeIndex(const CompositeIndex&);
+ CompositeIndex& operator=(const CompositeIndex&);
+
+ virtual ~CompositeIndex()
+ {
+ delete kdtree_index_;
+ delete kmeans_index_;
+ }
+
+ /**
+ * @return The index type
+ */
+ flann_algorithm_t getType() const
+ {
+ return FLANN_INDEX_COMPOSITE;
+ }
+
+ /**
+ * @return Size of the index
+ */
+ size_t size() const
+ {
+ return kdtree_index_->size();
+ }
+
+ /**
+ * \returns The dimensionality of the features in this index.
+ */
+ size_t veclen() const
+ {
+ return kdtree_index_->veclen();
+ }
+
+ /**
+ * \returns The amount of memory (in bytes) used by the index.
+ */
+ int usedMemory() const
+ {
+ return kmeans_index_->usedMemory() + kdtree_index_->usedMemory();
+ }
+
+ /**
+ * Dummy implementation for other algorithms of addable indexes after that.
+ */
+ void addIndex(const Matrix<ElementType>& /*wholeData*/, const Matrix<ElementType>& /*additionalData*/)
+ {
+ }
+
+ /**
+ * \brief Builds the index
+ */
+ void buildIndex()
+ {
+ Logger::info("Building kmeans tree...\n");
+ kmeans_index_->buildIndex();
+ Logger::info("Building kdtree tree...\n");
+ kdtree_index_->buildIndex();
+ }
+
+ /**
+ * \brief Saves the index to a stream
+ * \param stream The stream to save the index to
+ */
+ void saveIndex(FILE* stream)
+ {
+ kmeans_index_->saveIndex(stream);
+ kdtree_index_->saveIndex(stream);
+ }
+
+ /**
+ * \brief Loads the index from a stream
+ * \param stream The stream from which the index is loaded
+ */
+ void loadIndex(FILE* stream)
+ {
+ kmeans_index_->loadIndex(stream);
+ kdtree_index_->loadIndex(stream);
+ }
+
+ /**
+ * \returns The index parameters
+ */
+ IndexParams getParameters() const
+ {
+ return index_params_;
+ }
+
+ /**
+ * \brief Method that searches for nearest-neighbours
+ */
+ void findNeighbors(ResultSet<DistanceType>& result, const ElementType* vec, const SearchParams& searchParams)
+ {
+ kmeans_index_->findNeighbors(result, vec, searchParams);
+ kdtree_index_->findNeighbors(result, vec, searchParams);
+ }
+
+private:
+ /** The k-means index */
+ KMeansIndex<Distance>* kmeans_index_;
+
+ /** The kd-tree index */
+ KDTreeIndex<Distance>* kdtree_index_;
+
+ /** The index parameters */
+ const IndexParams index_params_;
+};
+
+}
+
+#endif //OPENCV_FLANN_COMPOSITE_INDEX_H_
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/config.h b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/config.h
new file mode 100644
index 00000000..56832fd3
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/config.h
@@ -0,0 +1,38 @@
+/***********************************************************************
+ * Software License Agreement (BSD License)
+ *
+ * Copyright 2008-2011 Marius Muja (mariusm@cs.ubc.ca). All rights reserved.
+ * Copyright 2008-2011 David G. Lowe (lowe@cs.ubc.ca). All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *************************************************************************/
+
+
+#ifndef OPENCV_FLANN_CONFIG_H_
+#define OPENCV_FLANN_CONFIG_H_
+
+#ifdef FLANN_VERSION_
+#undef FLANN_VERSION_
+#endif
+#define FLANN_VERSION_ "1.6.10"
+
+#endif /* OPENCV_FLANN_CONFIG_H_ */
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/defines.h b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/defines.h
new file mode 100644
index 00000000..13833b3c
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/defines.h
@@ -0,0 +1,176 @@
+/***********************************************************************
+ * Software License Agreement (BSD License)
+ *
+ * Copyright 2008-2011 Marius Muja (mariusm@cs.ubc.ca). All rights reserved.
+ * Copyright 2008-2011 David G. Lowe (lowe@cs.ubc.ca). All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *************************************************************************/
+
+
+#ifndef OPENCV_FLANN_DEFINES_H_
+#define OPENCV_FLANN_DEFINES_H_
+
+#include "config.h"
+
+#ifdef FLANN_EXPORT
+#undef FLANN_EXPORT
+#endif
+#ifdef WIN32
+/* win32 dll export/import directives */
+ #ifdef FLANN_EXPORTS
+ #define FLANN_EXPORT __declspec(dllexport)
+ #elif defined(FLANN_STATIC)
+ #define FLANN_EXPORT
+ #else
+ #define FLANN_EXPORT __declspec(dllimport)
+ #endif
+#else
+/* unix needs nothing */
+ #define FLANN_EXPORT
+#endif
+
+
+#ifdef FLANN_DEPRECATED
+#undef FLANN_DEPRECATED
+#endif
+#ifdef __GNUC__
+#define FLANN_DEPRECATED __attribute__ ((deprecated))
+#elif defined(_MSC_VER)
+#define FLANN_DEPRECATED __declspec(deprecated)
+#else
+#pragma message("WARNING: You need to implement FLANN_DEPRECATED for this compiler")
+#define FLANN_DEPRECATED
+#endif
+
+
+#undef FLANN_PLATFORM_32_BIT
+#undef FLANN_PLATFORM_64_BIT
+#if defined __amd64__ || defined __x86_64__ || defined _WIN64 || defined _M_X64
+#define FLANN_PLATFORM_64_BIT
+#else
+#define FLANN_PLATFORM_32_BIT
+#endif
+
+
+#undef FLANN_ARRAY_LEN
+#define FLANN_ARRAY_LEN(a) (sizeof(a)/sizeof(a[0]))
+
+namespace cvflann {
+
+/* Nearest neighbour index algorithms */
+enum flann_algorithm_t
+{
+ FLANN_INDEX_LINEAR = 0,
+ FLANN_INDEX_KDTREE = 1,
+ FLANN_INDEX_KMEANS = 2,
+ FLANN_INDEX_COMPOSITE = 3,
+ FLANN_INDEX_KDTREE_SINGLE = 4,
+ FLANN_INDEX_HIERARCHICAL = 5,
+ FLANN_INDEX_LSH = 6,
+ FLANN_INDEX_SAVED = 254,
+ FLANN_INDEX_AUTOTUNED = 255,
+
+ // deprecated constants, should use the FLANN_INDEX_* ones instead
+ LINEAR = 0,
+ KDTREE = 1,
+ KMEANS = 2,
+ COMPOSITE = 3,
+ KDTREE_SINGLE = 4,
+ SAVED = 254,
+ AUTOTUNED = 255
+};
+
+
+
+enum flann_centers_init_t
+{
+ FLANN_CENTERS_RANDOM = 0,
+ FLANN_CENTERS_GONZALES = 1,
+ FLANN_CENTERS_KMEANSPP = 2,
+
+ // deprecated constants, should use the FLANN_CENTERS_* ones instead
+ CENTERS_RANDOM = 0,
+ CENTERS_GONZALES = 1,
+ CENTERS_KMEANSPP = 2
+};
+
+enum flann_log_level_t
+{
+ FLANN_LOG_NONE = 0,
+ FLANN_LOG_FATAL = 1,
+ FLANN_LOG_ERROR = 2,
+ FLANN_LOG_WARN = 3,
+ FLANN_LOG_INFO = 4
+};
+
+enum flann_distance_t
+{
+ FLANN_DIST_EUCLIDEAN = 1,
+ FLANN_DIST_L2 = 1,
+ FLANN_DIST_MANHATTAN = 2,
+ FLANN_DIST_L1 = 2,
+ FLANN_DIST_MINKOWSKI = 3,
+ FLANN_DIST_MAX = 4,
+ FLANN_DIST_HIST_INTERSECT = 5,
+ FLANN_DIST_HELLINGER = 6,
+ FLANN_DIST_CHI_SQUARE = 7,
+ FLANN_DIST_CS = 7,
+ FLANN_DIST_KULLBACK_LEIBLER = 8,
+ FLANN_DIST_KL = 8,
+ FLANN_DIST_HAMMING = 9,
+
+ // deprecated constants, should use the FLANN_DIST_* ones instead
+ EUCLIDEAN = 1,
+ MANHATTAN = 2,
+ MINKOWSKI = 3,
+ MAX_DIST = 4,
+ HIST_INTERSECT = 5,
+ HELLINGER = 6,
+ CS = 7,
+ KL = 8,
+ KULLBACK_LEIBLER = 8
+};
+
+enum flann_datatype_t
+{
+ FLANN_INT8 = 0,
+ FLANN_INT16 = 1,
+ FLANN_INT32 = 2,
+ FLANN_INT64 = 3,
+ FLANN_UINT8 = 4,
+ FLANN_UINT16 = 5,
+ FLANN_UINT32 = 6,
+ FLANN_UINT64 = 7,
+ FLANN_FLOAT32 = 8,
+ FLANN_FLOAT64 = 9
+};
+
+enum
+{
+ FLANN_CHECKS_UNLIMITED = -1,
+ FLANN_CHECKS_AUTOTUNED = -2
+};
+
+}
+
+#endif /* OPENCV_FLANN_DEFINES_H_ */
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/dist.h b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/dist.h
new file mode 100644
index 00000000..5ba3d345
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/dist.h
@@ -0,0 +1,937 @@
+/***********************************************************************
+ * Software License Agreement (BSD License)
+ *
+ * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved.
+ * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved.
+ *
+ * THE BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *************************************************************************/
+
+#ifndef OPENCV_FLANN_DIST_H_
+#define OPENCV_FLANN_DIST_H_
+
+#include <cmath>
+#include <cstdlib>
+#include <string.h>
+#ifdef _MSC_VER
+typedef unsigned __int32 uint32_t;
+typedef unsigned __int64 uint64_t;
+#else
+#include <stdint.h>
+#endif
+
+#include "defines.h"
+
+#if (defined WIN32 || defined _WIN32) && defined(_M_ARM)
+# include <Intrin.h>
+#endif
+
+#if defined(__ARM_NEON__) || defined(__ARM_NEON)
+# include "arm_neon.h"
+#endif
+
+namespace cvflann
+{
+
+template<typename T>
+inline T abs(T x) { return (x<0) ? -x : x; }
+
+template<>
+inline int abs<int>(int x) { return ::abs(x); }
+
+template<>
+inline float abs<float>(float x) { return fabsf(x); }
+
+template<>
+inline double abs<double>(double x) { return fabs(x); }
+
+template<typename T>
+struct Accumulator { typedef T Type; };
+template<>
+struct Accumulator<unsigned char> { typedef float Type; };
+template<>
+struct Accumulator<unsigned short> { typedef float Type; };
+template<>
+struct Accumulator<unsigned int> { typedef float Type; };
+template<>
+struct Accumulator<char> { typedef float Type; };
+template<>
+struct Accumulator<short> { typedef float Type; };
+template<>
+struct Accumulator<int> { typedef float Type; };
+
+#undef True
+#undef False
+
+class True
+{
+};
+
+class False
+{
+};
+
+
+/**
+ * Squared Euclidean distance functor.
+ *
+ * This is the simpler, unrolled version. This is preferable for
+ * very low dimensionality data (eg 3D points)
+ */
+template<class T>
+struct L2_Simple
+{
+ typedef True is_kdtree_distance;
+ typedef True is_vector_space_distance;
+
+ typedef T ElementType;
+ typedef typename Accumulator<T>::Type ResultType;
+
+ template <typename Iterator1, typename Iterator2>
+ ResultType operator()(Iterator1 a, Iterator2 b, size_t size, ResultType /*worst_dist*/ = -1) const
+ {
+ ResultType result = ResultType();
+ ResultType diff;
+ for(size_t i = 0; i < size; ++i ) {
+ diff = *a++ - *b++;
+ result += diff*diff;
+ }
+ return result;
+ }
+
+ template <typename U, typename V>
+ inline ResultType accum_dist(const U& a, const V& b, int) const
+ {
+ return (a-b)*(a-b);
+ }
+};
+
+
+
+/**
+ * Squared Euclidean distance functor, optimized version
+ */
+template<class T>
+struct L2
+{
+ typedef True is_kdtree_distance;
+ typedef True is_vector_space_distance;
+
+ typedef T ElementType;
+ typedef typename Accumulator<T>::Type ResultType;
+
+ /**
+ * Compute the squared Euclidean distance between two vectors.
+ *
+ * This is highly optimised, with loop unrolling, as it is one
+ * of the most expensive inner loops.
+ *
+ * The computation of squared root at the end is omitted for
+ * efficiency.
+ */
+ template <typename Iterator1, typename Iterator2>
+ ResultType operator()(Iterator1 a, Iterator2 b, size_t size, ResultType worst_dist = -1) const
+ {
+ ResultType result = ResultType();
+ ResultType diff0, diff1, diff2, diff3;
+ Iterator1 last = a + size;
+ Iterator1 lastgroup = last - 3;
+
+ /* Process 4 items with each loop for efficiency. */
+ while (a < lastgroup) {
+ diff0 = (ResultType)(a[0] - b[0]);
+ diff1 = (ResultType)(a[1] - b[1]);
+ diff2 = (ResultType)(a[2] - b[2]);
+ diff3 = (ResultType)(a[3] - b[3]);
+ result += diff0 * diff0 + diff1 * diff1 + diff2 * diff2 + diff3 * diff3;
+ a += 4;
+ b += 4;
+
+ if ((worst_dist>0)&&(result>worst_dist)) {
+ return result;
+ }
+ }
+ /* Process last 0-3 pixels. Not needed for standard vector lengths. */
+ while (a < last) {
+ diff0 = (ResultType)(*a++ - *b++);
+ result += diff0 * diff0;
+ }
+ return result;
+ }
+
+ /**
+ * Partial euclidean distance, using just one dimension. This is used by the
+ * kd-tree when computing partial distances while traversing the tree.
+ *
+ * Squared root is omitted for efficiency.
+ */
+ template <typename U, typename V>
+ inline ResultType accum_dist(const U& a, const V& b, int) const
+ {
+ return (a-b)*(a-b);
+ }
+};
+
+
+/*
+ * Manhattan distance functor, optimized version
+ */
+template<class T>
+struct L1
+{
+ typedef True is_kdtree_distance;
+ typedef True is_vector_space_distance;
+
+ typedef T ElementType;
+ typedef typename Accumulator<T>::Type ResultType;
+
+ /**
+ * Compute the Manhattan (L_1) distance between two vectors.
+ *
+ * This is highly optimised, with loop unrolling, as it is one
+ * of the most expensive inner loops.
+ */
+ template <typename Iterator1, typename Iterator2>
+ ResultType operator()(Iterator1 a, Iterator2 b, size_t size, ResultType worst_dist = -1) const
+ {
+ ResultType result = ResultType();
+ ResultType diff0, diff1, diff2, diff3;
+ Iterator1 last = a + size;
+ Iterator1 lastgroup = last - 3;
+
+ /* Process 4 items with each loop for efficiency. */
+ while (a < lastgroup) {
+ diff0 = (ResultType)abs(a[0] - b[0]);
+ diff1 = (ResultType)abs(a[1] - b[1]);
+ diff2 = (ResultType)abs(a[2] - b[2]);
+ diff3 = (ResultType)abs(a[3] - b[3]);
+ result += diff0 + diff1 + diff2 + diff3;
+ a += 4;
+ b += 4;
+
+ if ((worst_dist>0)&&(result>worst_dist)) {
+ return result;
+ }
+ }
+ /* Process last 0-3 pixels. Not needed for standard vector lengths. */
+ while (a < last) {
+ diff0 = (ResultType)abs(*a++ - *b++);
+ result += diff0;
+ }
+ return result;
+ }
+
+ /**
+ * Partial distance, used by the kd-tree.
+ */
+ template <typename U, typename V>
+ inline ResultType accum_dist(const U& a, const V& b, int) const
+ {
+ return abs(a-b);
+ }
+};
+
+
+
+template<class T>
+struct MinkowskiDistance
+{
+ typedef True is_kdtree_distance;
+ typedef True is_vector_space_distance;
+
+ typedef T ElementType;
+ typedef typename Accumulator<T>::Type ResultType;
+
+ int order;
+
+ MinkowskiDistance(int order_) : order(order_) {}
+
+ /**
+ * Compute the Minkowsky (L_p) distance between two vectors.
+ *
+ * This is highly optimised, with loop unrolling, as it is one
+ * of the most expensive inner loops.
+ *
+ * The computation of squared root at the end is omitted for
+ * efficiency.
+ */
+ template <typename Iterator1, typename Iterator2>
+ ResultType operator()(Iterator1 a, Iterator2 b, size_t size, ResultType worst_dist = -1) const
+ {
+ ResultType result = ResultType();
+ ResultType diff0, diff1, diff2, diff3;
+ Iterator1 last = a + size;
+ Iterator1 lastgroup = last - 3;
+
+ /* Process 4 items with each loop for efficiency. */
+ while (a < lastgroup) {
+ diff0 = (ResultType)abs(a[0] - b[0]);
+ diff1 = (ResultType)abs(a[1] - b[1]);
+ diff2 = (ResultType)abs(a[2] - b[2]);
+ diff3 = (ResultType)abs(a[3] - b[3]);
+ result += pow(diff0,order) + pow(diff1,order) + pow(diff2,order) + pow(diff3,order);
+ a += 4;
+ b += 4;
+
+ if ((worst_dist>0)&&(result>worst_dist)) {
+ return result;
+ }
+ }
+ /* Process last 0-3 pixels. Not needed for standard vector lengths. */
+ while (a < last) {
+ diff0 = (ResultType)abs(*a++ - *b++);
+ result += pow(diff0,order);
+ }
+ return result;
+ }
+
+ /**
+ * Partial distance, used by the kd-tree.
+ */
+ template <typename U, typename V>
+ inline ResultType accum_dist(const U& a, const V& b, int) const
+ {
+ return pow(static_cast<ResultType>(abs(a-b)),order);
+ }
+};
+
+
+
+template<class T>
+struct MaxDistance
+{
+ typedef False is_kdtree_distance;
+ typedef True is_vector_space_distance;
+
+ typedef T ElementType;
+ typedef typename Accumulator<T>::Type ResultType;
+
+ /**
+ * Compute the max distance (L_infinity) between two vectors.
+ *
+ * This distance is not a valid kdtree distance, it's not dimensionwise additive.
+ */
+ template <typename Iterator1, typename Iterator2>
+ ResultType operator()(Iterator1 a, Iterator2 b, size_t size, ResultType worst_dist = -1) const
+ {
+ ResultType result = ResultType();
+ ResultType diff0, diff1, diff2, diff3;
+ Iterator1 last = a + size;
+ Iterator1 lastgroup = last - 3;
+
+ /* Process 4 items with each loop for efficiency. */
+ while (a < lastgroup) {
+ diff0 = abs(a[0] - b[0]);
+ diff1 = abs(a[1] - b[1]);
+ diff2 = abs(a[2] - b[2]);
+ diff3 = abs(a[3] - b[3]);
+ if (diff0>result) {result = diff0; }
+ if (diff1>result) {result = diff1; }
+ if (diff2>result) {result = diff2; }
+ if (diff3>result) {result = diff3; }
+ a += 4;
+ b += 4;
+
+ if ((worst_dist>0)&&(result>worst_dist)) {
+ return result;
+ }
+ }
+ /* Process last 0-3 pixels. Not needed for standard vector lengths. */
+ while (a < last) {
+ diff0 = abs(*a++ - *b++);
+ result = (diff0>result) ? diff0 : result;
+ }
+ return result;
+ }
+
+ /* This distance functor is not dimension-wise additive, which
+ * makes it an invalid kd-tree distance, not implementing the accum_dist method */
+
+};
+
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+/**
+ * Hamming distance functor - counts the bit differences between two strings - useful for the Brief descriptor
+ * bit count of A exclusive XOR'ed with B
+ */
+struct HammingLUT
+{
+ typedef False is_kdtree_distance;
+ typedef False is_vector_space_distance;
+
+ typedef unsigned char ElementType;
+ typedef int ResultType;
+
+ /** this will count the bits in a ^ b
+ */
+ ResultType operator()(const unsigned char* a, const unsigned char* b, int size) const
+ {
+ static const uchar popCountTable[] =
+ {
+ 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
+ 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
+ 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
+ 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
+ 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
+ 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
+ 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
+ 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8
+ };
+ ResultType result = 0;
+ for (int i = 0; i < size; i++) {
+ result += popCountTable[a[i] ^ b[i]];
+ }
+ return result;
+ }
+};
+
+/**
+ * Hamming distance functor - counts the bit differences between two strings - useful for the Brief descriptor
+ * bit count of A exclusive XOR'ed with B
+ */
+struct HammingLUT2
+{
+ typedef False is_kdtree_distance;
+ typedef False is_vector_space_distance;
+
+ typedef unsigned char ElementType;
+ typedef int ResultType;
+
+ /** this will count the bits in a ^ b
+ */
+ ResultType operator()(const unsigned char* a, const unsigned char* b, size_t size) const
+ {
+ static const uchar popCountTable[] =
+ {
+ 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
+ 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
+ 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
+ 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
+ 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
+ 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
+ 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
+ 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8
+ };
+ ResultType result = 0;
+ for (size_t i = 0; i < size; i++) {
+ result += popCountTable[a[i] ^ b[i]];
+ }
+ return result;
+ }
+};
+
+/**
+ * Hamming distance functor (pop count between two binary vectors, i.e. xor them and count the number of bits set)
+ * That code was taken from brief.cpp in OpenCV
+ */
+template<class T>
+struct Hamming
+{
+ typedef False is_kdtree_distance;
+ typedef False is_vector_space_distance;
+
+
+ typedef T ElementType;
+ typedef int ResultType;
+
+ template<typename Iterator1, typename Iterator2>
+ ResultType operator()(Iterator1 a, Iterator2 b, size_t size, ResultType /*worst_dist*/ = -1) const
+ {
+ ResultType result = 0;
+#if defined(__ARM_NEON__) || defined(__ARM_NEON)
+ {
+ uint32x4_t bits = vmovq_n_u32(0);
+ for (size_t i = 0; i < size; i += 16) {
+ uint8x16_t A_vec = vld1q_u8 (a + i);
+ uint8x16_t B_vec = vld1q_u8 (b + i);
+ uint8x16_t AxorB = veorq_u8 (A_vec, B_vec);
+ uint8x16_t bitsSet = vcntq_u8 (AxorB);
+ uint16x8_t bitSet8 = vpaddlq_u8 (bitsSet);
+ uint32x4_t bitSet4 = vpaddlq_u16 (bitSet8);
+ bits = vaddq_u32(bits, bitSet4);
+ }
+ uint64x2_t bitSet2 = vpaddlq_u32 (bits);
+ result = vgetq_lane_s32 (vreinterpretq_s32_u64(bitSet2),0);
+ result += vgetq_lane_s32 (vreinterpretq_s32_u64(bitSet2),2);
+ }
+#elif __GNUC__
+ {
+ //for portability just use unsigned long -- and use the __builtin_popcountll (see docs for __builtin_popcountll)
+ typedef unsigned long long pop_t;
+ const size_t modulo = size % sizeof(pop_t);
+ const pop_t* a2 = reinterpret_cast<const pop_t*> (a);
+ const pop_t* b2 = reinterpret_cast<const pop_t*> (b);
+ const pop_t* a2_end = a2 + (size / sizeof(pop_t));
+
+ for (; a2 != a2_end; ++a2, ++b2) result += __builtin_popcountll((*a2) ^ (*b2));
+
+ if (modulo) {
+ //in the case where size is not dividable by sizeof(size_t)
+ //need to mask off the bits at the end
+ pop_t a_final = 0, b_final = 0;
+ memcpy(&a_final, a2, modulo);
+ memcpy(&b_final, b2, modulo);
+ result += __builtin_popcountll(a_final ^ b_final);
+ }
+ }
+#else // NO NEON and NOT GNUC
+ typedef unsigned long long pop_t;
+ HammingLUT lut;
+ result = lut(reinterpret_cast<const unsigned char*> (a),
+ reinterpret_cast<const unsigned char*> (b), size * sizeof(pop_t));
+#endif
+ return result;
+ }
+};
+
+template<typename T>
+struct Hamming2
+{
+ typedef False is_kdtree_distance;
+ typedef False is_vector_space_distance;
+
+ typedef T ElementType;
+ typedef int ResultType;
+
+ /** This is popcount_3() from:
+ * http://en.wikipedia.org/wiki/Hamming_weight */
+ unsigned int popcnt32(uint32_t n) const
+ {
+ n -= ((n >> 1) & 0x55555555);
+ n = (n & 0x33333333) + ((n >> 2) & 0x33333333);
+ return (((n + (n >> 4))& 0xF0F0F0F)* 0x1010101) >> 24;
+ }
+
+#ifdef FLANN_PLATFORM_64_BIT
+ unsigned int popcnt64(uint64_t n) const
+ {
+ n -= ((n >> 1) & 0x5555555555555555);
+ n = (n & 0x3333333333333333) + ((n >> 2) & 0x3333333333333333);
+ return (((n + (n >> 4))& 0x0f0f0f0f0f0f0f0f)* 0x0101010101010101) >> 56;
+ }
+#endif
+
+ template <typename Iterator1, typename Iterator2>
+ ResultType operator()(Iterator1 a, Iterator2 b, size_t size, ResultType /*worst_dist*/ = -1) const
+ {
+#ifdef FLANN_PLATFORM_64_BIT
+ const uint64_t* pa = reinterpret_cast<const uint64_t*>(a);
+ const uint64_t* pb = reinterpret_cast<const uint64_t*>(b);
+ ResultType result = 0;
+ size /= (sizeof(uint64_t)/sizeof(unsigned char));
+ for(size_t i = 0; i < size; ++i ) {
+ result += popcnt64(*pa ^ *pb);
+ ++pa;
+ ++pb;
+ }
+#else
+ const uint32_t* pa = reinterpret_cast<const uint32_t*>(a);
+ const uint32_t* pb = reinterpret_cast<const uint32_t*>(b);
+ ResultType result = 0;
+ size /= (sizeof(uint32_t)/sizeof(unsigned char));
+ for(size_t i = 0; i < size; ++i ) {
+ result += popcnt32(*pa ^ *pb);
+ ++pa;
+ ++pb;
+ }
+#endif
+ return result;
+ }
+};
+
+
+
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+template<class T>
+struct HistIntersectionDistance
+{
+ typedef True is_kdtree_distance;
+ typedef True is_vector_space_distance;
+
+ typedef T ElementType;
+ typedef typename Accumulator<T>::Type ResultType;
+
+ /**
+ * Compute the histogram intersection distance
+ */
+ template <typename Iterator1, typename Iterator2>
+ ResultType operator()(Iterator1 a, Iterator2 b, size_t size, ResultType worst_dist = -1) const
+ {
+ ResultType result = ResultType();
+ ResultType min0, min1, min2, min3;
+ Iterator1 last = a + size;
+ Iterator1 lastgroup = last - 3;
+
+ /* Process 4 items with each loop for efficiency. */
+ while (a < lastgroup) {
+ min0 = (ResultType)(a[0] < b[0] ? a[0] : b[0]);
+ min1 = (ResultType)(a[1] < b[1] ? a[1] : b[1]);
+ min2 = (ResultType)(a[2] < b[2] ? a[2] : b[2]);
+ min3 = (ResultType)(a[3] < b[3] ? a[3] : b[3]);
+ result += min0 + min1 + min2 + min3;
+ a += 4;
+ b += 4;
+ if ((worst_dist>0)&&(result>worst_dist)) {
+ return result;
+ }
+ }
+ /* Process last 0-3 pixels. Not needed for standard vector lengths. */
+ while (a < last) {
+ min0 = (ResultType)(*a < *b ? *a : *b);
+ result += min0;
+ ++a;
+ ++b;
+ }
+ return result;
+ }
+
+ /**
+ * Partial distance, used by the kd-tree.
+ */
+ template <typename U, typename V>
+ inline ResultType accum_dist(const U& a, const V& b, int) const
+ {
+ return a<b ? a : b;
+ }
+};
+
+
+
+template<class T>
+struct HellingerDistance
+{
+ typedef True is_kdtree_distance;
+ typedef True is_vector_space_distance;
+
+ typedef T ElementType;
+ typedef typename Accumulator<T>::Type ResultType;
+
+ /**
+ * Compute the histogram intersection distance
+ */
+ template <typename Iterator1, typename Iterator2>
+ ResultType operator()(Iterator1 a, Iterator2 b, size_t size, ResultType /*worst_dist*/ = -1) const
+ {
+ ResultType result = ResultType();
+ ResultType diff0, diff1, diff2, diff3;
+ Iterator1 last = a + size;
+ Iterator1 lastgroup = last - 3;
+
+ /* Process 4 items with each loop for efficiency. */
+ while (a < lastgroup) {
+ diff0 = sqrt(static_cast<ResultType>(a[0])) - sqrt(static_cast<ResultType>(b[0]));
+ diff1 = sqrt(static_cast<ResultType>(a[1])) - sqrt(static_cast<ResultType>(b[1]));
+ diff2 = sqrt(static_cast<ResultType>(a[2])) - sqrt(static_cast<ResultType>(b[2]));
+ diff3 = sqrt(static_cast<ResultType>(a[3])) - sqrt(static_cast<ResultType>(b[3]));
+ result += diff0 * diff0 + diff1 * diff1 + diff2 * diff2 + diff3 * diff3;
+ a += 4;
+ b += 4;
+ }
+ while (a < last) {
+ diff0 = sqrt(static_cast<ResultType>(*a++)) - sqrt(static_cast<ResultType>(*b++));
+ result += diff0 * diff0;
+ }
+ return result;
+ }
+
+ /**
+ * Partial distance, used by the kd-tree.
+ */
+ template <typename U, typename V>
+ inline ResultType accum_dist(const U& a, const V& b, int) const
+ {
+ return sqrt(static_cast<ResultType>(a)) - sqrt(static_cast<ResultType>(b));
+ }
+};
+
+
+template<class T>
+struct ChiSquareDistance
+{
+ typedef True is_kdtree_distance;
+ typedef True is_vector_space_distance;
+
+ typedef T ElementType;
+ typedef typename Accumulator<T>::Type ResultType;
+
+ /**
+ * Compute the chi-square distance
+ */
+ template <typename Iterator1, typename Iterator2>
+ ResultType operator()(Iterator1 a, Iterator2 b, size_t size, ResultType worst_dist = -1) const
+ {
+ ResultType result = ResultType();
+ ResultType sum, diff;
+ Iterator1 last = a + size;
+
+ while (a < last) {
+ sum = (ResultType)(*a + *b);
+ if (sum>0) {
+ diff = (ResultType)(*a - *b);
+ result += diff*diff/sum;
+ }
+ ++a;
+ ++b;
+
+ if ((worst_dist>0)&&(result>worst_dist)) {
+ return result;
+ }
+ }
+ return result;
+ }
+
+ /**
+ * Partial distance, used by the kd-tree.
+ */
+ template <typename U, typename V>
+ inline ResultType accum_dist(const U& a, const V& b, int) const
+ {
+ ResultType result = ResultType();
+ ResultType sum, diff;
+
+ sum = (ResultType)(a+b);
+ if (sum>0) {
+ diff = (ResultType)(a-b);
+ result = diff*diff/sum;
+ }
+ return result;
+ }
+};
+
+
+template<class T>
+struct KL_Divergence
+{
+ typedef True is_kdtree_distance;
+ typedef True is_vector_space_distance;
+
+ typedef T ElementType;
+ typedef typename Accumulator<T>::Type ResultType;
+
+ /**
+ * Compute the Kullback–Leibler divergence
+ */
+ template <typename Iterator1, typename Iterator2>
+ ResultType operator()(Iterator1 a, Iterator2 b, size_t size, ResultType worst_dist = -1) const
+ {
+ ResultType result = ResultType();
+ Iterator1 last = a + size;
+
+ while (a < last) {
+ if (* a != 0) {
+ ResultType ratio = (ResultType)(*a / *b);
+ if (ratio>0) {
+ result += *a * log(ratio);
+ }
+ }
+ ++a;
+ ++b;
+
+ if ((worst_dist>0)&&(result>worst_dist)) {
+ return result;
+ }
+ }
+ return result;
+ }
+
+ /**
+ * Partial distance, used by the kd-tree.
+ */
+ template <typename U, typename V>
+ inline ResultType accum_dist(const U& a, const V& b, int) const
+ {
+ ResultType result = ResultType();
+ ResultType ratio = (ResultType)(a / b);
+ if (ratio>0) {
+ result = a * log(ratio);
+ }
+ return result;
+ }
+};
+
+
+
+/*
+ * This is a "zero iterator". It basically behaves like a zero filled
+ * array to all algorithms that use arrays as iterators (STL style).
+ * It's useful when there's a need to compute the distance between feature
+ * and origin it and allows for better compiler optimisation than using a
+ * zero-filled array.
+ */
+template <typename T>
+struct ZeroIterator
+{
+
+ T operator*()
+ {
+ return 0;
+ }
+
+ T operator[](int)
+ {
+ return 0;
+ }
+
+ const ZeroIterator<T>& operator ++()
+ {
+ return *this;
+ }
+
+ ZeroIterator<T> operator ++(int)
+ {
+ return *this;
+ }
+
+ ZeroIterator<T>& operator+=(int)
+ {
+ return *this;
+ }
+
+};
+
+
+/*
+ * Depending on processed distances, some of them are already squared (e.g. L2)
+ * and some are not (e.g.Hamming). In KMeans++ for instance we want to be sure
+ * we are working on ^2 distances, thus following templates to ensure that.
+ */
+template <typename Distance, typename ElementType>
+struct squareDistance
+{
+ typedef typename Distance::ResultType ResultType;
+ ResultType operator()( ResultType dist ) { return dist*dist; }
+};
+
+
+template <typename ElementType>
+struct squareDistance<L2_Simple<ElementType>, ElementType>
+{
+ typedef typename L2_Simple<ElementType>::ResultType ResultType;
+ ResultType operator()( ResultType dist ) { return dist; }
+};
+
+template <typename ElementType>
+struct squareDistance<L2<ElementType>, ElementType>
+{
+ typedef typename L2<ElementType>::ResultType ResultType;
+ ResultType operator()( ResultType dist ) { return dist; }
+};
+
+
+template <typename ElementType>
+struct squareDistance<MinkowskiDistance<ElementType>, ElementType>
+{
+ typedef typename MinkowskiDistance<ElementType>::ResultType ResultType;
+ ResultType operator()( ResultType dist ) { return dist; }
+};
+
+template <typename ElementType>
+struct squareDistance<HellingerDistance<ElementType>, ElementType>
+{
+ typedef typename HellingerDistance<ElementType>::ResultType ResultType;
+ ResultType operator()( ResultType dist ) { return dist; }
+};
+
+template <typename ElementType>
+struct squareDistance<ChiSquareDistance<ElementType>, ElementType>
+{
+ typedef typename ChiSquareDistance<ElementType>::ResultType ResultType;
+ ResultType operator()( ResultType dist ) { return dist; }
+};
+
+
+template <typename Distance>
+typename Distance::ResultType ensureSquareDistance( typename Distance::ResultType dist )
+{
+ typedef typename Distance::ElementType ElementType;
+
+ squareDistance<Distance, ElementType> dummy;
+ return dummy( dist );
+}
+
+
+/*
+ * ...and a template to ensure the user that he will process the normal distance,
+ * and not squared distance, without loosing processing time calling sqrt(ensureSquareDistance)
+ * that will result in doing actually sqrt(dist*dist) for L1 distance for instance.
+ */
+template <typename Distance, typename ElementType>
+struct simpleDistance
+{
+ typedef typename Distance::ResultType ResultType;
+ ResultType operator()( ResultType dist ) { return dist; }
+};
+
+
+template <typename ElementType>
+struct simpleDistance<L2_Simple<ElementType>, ElementType>
+{
+ typedef typename L2_Simple<ElementType>::ResultType ResultType;
+ ResultType operator()( ResultType dist ) { return sqrt(dist); }
+};
+
+template <typename ElementType>
+struct simpleDistance<L2<ElementType>, ElementType>
+{
+ typedef typename L2<ElementType>::ResultType ResultType;
+ ResultType operator()( ResultType dist ) { return sqrt(dist); }
+};
+
+
+template <typename ElementType>
+struct simpleDistance<MinkowskiDistance<ElementType>, ElementType>
+{
+ typedef typename MinkowskiDistance<ElementType>::ResultType ResultType;
+ ResultType operator()( ResultType dist ) { return sqrt(dist); }
+};
+
+template <typename ElementType>
+struct simpleDistance<HellingerDistance<ElementType>, ElementType>
+{
+ typedef typename HellingerDistance<ElementType>::ResultType ResultType;
+ ResultType operator()( ResultType dist ) { return sqrt(dist); }
+};
+
+template <typename ElementType>
+struct simpleDistance<ChiSquareDistance<ElementType>, ElementType>
+{
+ typedef typename ChiSquareDistance<ElementType>::ResultType ResultType;
+ ResultType operator()( ResultType dist ) { return sqrt(dist); }
+};
+
+
+template <typename Distance>
+typename Distance::ResultType ensureSimpleDistance( typename Distance::ResultType dist )
+{
+ typedef typename Distance::ElementType ElementType;
+
+ simpleDistance<Distance, ElementType> dummy;
+ return dummy( dist );
+}
+
+}
+
+#endif //OPENCV_FLANN_DIST_H_
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/dummy.h b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/dummy.h
new file mode 100644
index 00000000..33909818
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/dummy.h
@@ -0,0 +1,45 @@
+/***********************************************************************
+ * Software License Agreement (BSD License)
+ *
+ * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved.
+ * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved.
+ *
+ * THE BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *************************************************************************/
+
+#ifndef OPENCV_FLANN_DUMMY_H_
+#define OPENCV_FLANN_DUMMY_H_
+
+namespace cvflann
+{
+
+#if (defined WIN32 || defined _WIN32 || defined WINCE) && defined CVAPI_EXPORTS
+__declspec(dllexport)
+#endif
+void dummyfunc();
+
+}
+
+
+#endif /* OPENCV_FLANN_DUMMY_H_ */
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/dynamic_bitset.h b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/dynamic_bitset.h
new file mode 100644
index 00000000..d795b5d6
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/dynamic_bitset.h
@@ -0,0 +1,159 @@
+/***********************************************************************
+ * Software License Agreement (BSD License)
+ *
+ * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved.
+ * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved.
+ *
+ * THE BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *************************************************************************/
+
+/***********************************************************************
+ * Author: Vincent Rabaud
+ *************************************************************************/
+
+#ifndef OPENCV_FLANN_DYNAMIC_BITSET_H_
+#define OPENCV_FLANN_DYNAMIC_BITSET_H_
+
+#ifndef FLANN_USE_BOOST
+# define FLANN_USE_BOOST 0
+#endif
+//#define FLANN_USE_BOOST 1
+#if FLANN_USE_BOOST
+#include <boost/dynamic_bitset.hpp>
+typedef boost::dynamic_bitset<> DynamicBitset;
+#else
+
+#include <limits.h>
+
+#include "dist.h"
+
+namespace cvflann {
+
+/** Class re-implementing the boost version of it
+ * This helps not depending on boost, it also does not do the bound checks
+ * and has a way to reset a block for speed
+ */
+class DynamicBitset
+{
+public:
+ /** default constructor
+ */
+ DynamicBitset()
+ {
+ }
+
+ /** only constructor we use in our code
+ * @param sz the size of the bitset (in bits)
+ */
+ DynamicBitset(size_t sz)
+ {
+ resize(sz);
+ reset();
+ }
+
+ /** Sets all the bits to 0
+ */
+ void clear()
+ {
+ std::fill(bitset_.begin(), bitset_.end(), 0);
+ }
+
+ /** @brief checks if the bitset is empty
+ * @return true if the bitset is empty
+ */
+ bool empty() const
+ {
+ return bitset_.empty();
+ }
+
+ /** set all the bits to 0
+ */
+ void reset()
+ {
+ std::fill(bitset_.begin(), bitset_.end(), 0);
+ }
+
+ /** @brief set one bit to 0
+ * @param index
+ */
+ void reset(size_t index)
+ {
+ bitset_[index / cell_bit_size_] &= ~(size_t(1) << (index % cell_bit_size_));
+ }
+
+ /** @brief sets a specific bit to 0, and more bits too
+ * This function is useful when resetting a given set of bits so that the
+ * whole bitset ends up being 0: if that's the case, we don't care about setting
+ * other bits to 0
+ * @param index
+ */
+ void reset_block(size_t index)
+ {
+ bitset_[index / cell_bit_size_] = 0;
+ }
+
+ /** resize the bitset so that it contains at least sz bits
+ * @param sz
+ */
+ void resize(size_t sz)
+ {
+ size_ = sz;
+ bitset_.resize(sz / cell_bit_size_ + 1);
+ }
+
+ /** set a bit to true
+ * @param index the index of the bit to set to 1
+ */
+ void set(size_t index)
+ {
+ bitset_[index / cell_bit_size_] |= size_t(1) << (index % cell_bit_size_);
+ }
+
+ /** gives the number of contained bits
+ */
+ size_t size() const
+ {
+ return size_;
+ }
+
+ /** check if a bit is set
+ * @param index the index of the bit to check
+ * @return true if the bit is set
+ */
+ bool test(size_t index) const
+ {
+ return (bitset_[index / cell_bit_size_] & (size_t(1) << (index % cell_bit_size_))) != 0;
+ }
+
+private:
+ std::vector<size_t> bitset_;
+ size_t size_;
+ static const unsigned int cell_bit_size_ = CHAR_BIT * sizeof(size_t);
+};
+
+} // namespace cvflann
+
+#endif
+
+#endif // OPENCV_FLANN_DYNAMIC_BITSET_H_
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/flann.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/flann.hpp
new file mode 100644
index 00000000..d053488e
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/flann.hpp
@@ -0,0 +1,427 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef _OPENCV_FLANN_HPP_
+#define _OPENCV_FLANN_HPP_
+
+#ifdef __cplusplus
+
+#include "opencv2/core/types_c.h"
+#include "opencv2/core/core.hpp"
+#include "opencv2/flann/flann_base.hpp"
+#include "opencv2/flann/miniflann.hpp"
+
+namespace cvflann
+{
+ CV_EXPORTS flann_distance_t flann_distance_type();
+ FLANN_DEPRECATED CV_EXPORTS void set_distance_type(flann_distance_t distance_type, int order);
+}
+
+
+namespace cv
+{
+namespace flann
+{
+
+template <typename T> struct CvType {};
+template <> struct CvType<unsigned char> { static int type() { return CV_8U; } };
+template <> struct CvType<char> { static int type() { return CV_8S; } };
+template <> struct CvType<unsigned short> { static int type() { return CV_16U; } };
+template <> struct CvType<short> { static int type() { return CV_16S; } };
+template <> struct CvType<int> { static int type() { return CV_32S; } };
+template <> struct CvType<float> { static int type() { return CV_32F; } };
+template <> struct CvType<double> { static int type() { return CV_64F; } };
+
+
+// bring the flann parameters into this namespace
+using ::cvflann::get_param;
+using ::cvflann::print_params;
+
+// bring the flann distances into this namespace
+using ::cvflann::L2_Simple;
+using ::cvflann::L2;
+using ::cvflann::L1;
+using ::cvflann::MinkowskiDistance;
+using ::cvflann::MaxDistance;
+using ::cvflann::HammingLUT;
+using ::cvflann::Hamming;
+using ::cvflann::Hamming2;
+using ::cvflann::HistIntersectionDistance;
+using ::cvflann::HellingerDistance;
+using ::cvflann::ChiSquareDistance;
+using ::cvflann::KL_Divergence;
+
+
+
+template <typename Distance>
+class GenericIndex
+{
+public:
+ typedef typename Distance::ElementType ElementType;
+ typedef typename Distance::ResultType DistanceType;
+
+ GenericIndex(const Mat& features, const ::cvflann::IndexParams& params, Distance distance = Distance());
+
+ ~GenericIndex();
+
+ void knnSearch(const vector<ElementType>& query, vector<int>& indices,
+ vector<DistanceType>& dists, int knn, const ::cvflann::SearchParams& params);
+ void knnSearch(const Mat& queries, Mat& indices, Mat& dists, int knn, const ::cvflann::SearchParams& params);
+
+ int radiusSearch(const vector<ElementType>& query, vector<int>& indices,
+ vector<DistanceType>& dists, DistanceType radius, const ::cvflann::SearchParams& params);
+ int radiusSearch(const Mat& query, Mat& indices, Mat& dists,
+ DistanceType radius, const ::cvflann::SearchParams& params);
+
+ void save(std::string filename) { nnIndex->save(filename); }
+
+ int veclen() const { return nnIndex->veclen(); }
+
+ int size() const { return nnIndex->size(); }
+
+ ::cvflann::IndexParams getParameters() { return nnIndex->getParameters(); }
+
+ FLANN_DEPRECATED const ::cvflann::IndexParams* getIndexParameters() { return nnIndex->getIndexParameters(); }
+
+private:
+ ::cvflann::Index<Distance>* nnIndex;
+};
+
+
+#define FLANN_DISTANCE_CHECK \
+ if ( ::cvflann::flann_distance_type() != cvflann::FLANN_DIST_L2) { \
+ printf("[WARNING] You are using cv::flann::Index (or cv::flann::GenericIndex) and have also changed "\
+ "the distance using cvflann::set_distance_type. This is no longer working as expected "\
+ "(cv::flann::Index always uses L2). You should create the index templated on the distance, "\
+ "for example for L1 distance use: GenericIndex< L1<float> > \n"); \
+ }
+
+
+template <typename Distance>
+GenericIndex<Distance>::GenericIndex(const Mat& dataset, const ::cvflann::IndexParams& params, Distance distance)
+{
+ CV_Assert(dataset.type() == CvType<ElementType>::type());
+ CV_Assert(dataset.isContinuous());
+ ::cvflann::Matrix<ElementType> m_dataset((ElementType*)dataset.ptr<ElementType>(0), dataset.rows, dataset.cols);
+
+ nnIndex = new ::cvflann::Index<Distance>(m_dataset, params, distance);
+
+ FLANN_DISTANCE_CHECK
+
+ nnIndex->buildIndex();
+}
+
+template <typename Distance>
+GenericIndex<Distance>::~GenericIndex()
+{
+ delete nnIndex;
+}
+
+template <typename Distance>
+void GenericIndex<Distance>::knnSearch(const vector<ElementType>& query, vector<int>& indices, vector<DistanceType>& dists, int knn, const ::cvflann::SearchParams& searchParams)
+{
+ ::cvflann::Matrix<ElementType> m_query((ElementType*)&query[0], 1, query.size());
+ ::cvflann::Matrix<int> m_indices(&indices[0], 1, indices.size());
+ ::cvflann::Matrix<DistanceType> m_dists(&dists[0], 1, dists.size());
+
+ FLANN_DISTANCE_CHECK
+
+ nnIndex->knnSearch(m_query,m_indices,m_dists,knn,searchParams);
+}
+
+
+template <typename Distance>
+void GenericIndex<Distance>::knnSearch(const Mat& queries, Mat& indices, Mat& dists, int knn, const ::cvflann::SearchParams& searchParams)
+{
+ CV_Assert(queries.type() == CvType<ElementType>::type());
+ CV_Assert(queries.isContinuous());
+ ::cvflann::Matrix<ElementType> m_queries((ElementType*)queries.ptr<ElementType>(0), queries.rows, queries.cols);
+
+ CV_Assert(indices.type() == CV_32S);
+ CV_Assert(indices.isContinuous());
+ ::cvflann::Matrix<int> m_indices((int*)indices.ptr<int>(0), indices.rows, indices.cols);
+
+ CV_Assert(dists.type() == CvType<DistanceType>::type());
+ CV_Assert(dists.isContinuous());
+ ::cvflann::Matrix<DistanceType> m_dists((DistanceType*)dists.ptr<DistanceType>(0), dists.rows, dists.cols);
+
+ FLANN_DISTANCE_CHECK
+
+ nnIndex->knnSearch(m_queries,m_indices,m_dists,knn, searchParams);
+}
+
+template <typename Distance>
+int GenericIndex<Distance>::radiusSearch(const vector<ElementType>& query, vector<int>& indices, vector<DistanceType>& dists, DistanceType radius, const ::cvflann::SearchParams& searchParams)
+{
+ ::cvflann::Matrix<ElementType> m_query((ElementType*)&query[0], 1, query.size());
+ ::cvflann::Matrix<int> m_indices(&indices[0], 1, indices.size());
+ ::cvflann::Matrix<DistanceType> m_dists(&dists[0], 1, dists.size());
+
+ FLANN_DISTANCE_CHECK
+
+ return nnIndex->radiusSearch(m_query,m_indices,m_dists,radius,searchParams);
+}
+
+template <typename Distance>
+int GenericIndex<Distance>::radiusSearch(const Mat& query, Mat& indices, Mat& dists, DistanceType radius, const ::cvflann::SearchParams& searchParams)
+{
+ CV_Assert(query.type() == CvType<ElementType>::type());
+ CV_Assert(query.isContinuous());
+ ::cvflann::Matrix<ElementType> m_query((ElementType*)query.ptr<ElementType>(0), query.rows, query.cols);
+
+ CV_Assert(indices.type() == CV_32S);
+ CV_Assert(indices.isContinuous());
+ ::cvflann::Matrix<int> m_indices((int*)indices.ptr<int>(0), indices.rows, indices.cols);
+
+ CV_Assert(dists.type() == CvType<DistanceType>::type());
+ CV_Assert(dists.isContinuous());
+ ::cvflann::Matrix<DistanceType> m_dists((DistanceType*)dists.ptr<DistanceType>(0), dists.rows, dists.cols);
+
+ FLANN_DISTANCE_CHECK
+
+ return nnIndex->radiusSearch(m_query,m_indices,m_dists,radius,searchParams);
+}
+
+/**
+ * @deprecated Use GenericIndex class instead
+ */
+template <typename T>
+class
+#ifndef _MSC_VER
+ FLANN_DEPRECATED
+#endif
+ Index_ {
+public:
+ typedef typename L2<T>::ElementType ElementType;
+ typedef typename L2<T>::ResultType DistanceType;
+
+ Index_(const Mat& features, const ::cvflann::IndexParams& params);
+
+ ~Index_();
+
+ void knnSearch(const vector<ElementType>& query, vector<int>& indices, vector<DistanceType>& dists, int knn, const ::cvflann::SearchParams& params);
+ void knnSearch(const Mat& queries, Mat& indices, Mat& dists, int knn, const ::cvflann::SearchParams& params);
+
+ int radiusSearch(const vector<ElementType>& query, vector<int>& indices, vector<DistanceType>& dists, DistanceType radius, const ::cvflann::SearchParams& params);
+ int radiusSearch(const Mat& query, Mat& indices, Mat& dists, DistanceType radius, const ::cvflann::SearchParams& params);
+
+ void save(std::string filename)
+ {
+ if (nnIndex_L1) nnIndex_L1->save(filename);
+ if (nnIndex_L2) nnIndex_L2->save(filename);
+ }
+
+ int veclen() const
+ {
+ if (nnIndex_L1) return nnIndex_L1->veclen();
+ if (nnIndex_L2) return nnIndex_L2->veclen();
+ }
+
+ int size() const
+ {
+ if (nnIndex_L1) return nnIndex_L1->size();
+ if (nnIndex_L2) return nnIndex_L2->size();
+ }
+
+ ::cvflann::IndexParams getParameters()
+ {
+ if (nnIndex_L1) return nnIndex_L1->getParameters();
+ if (nnIndex_L2) return nnIndex_L2->getParameters();
+
+ }
+
+ FLANN_DEPRECATED const ::cvflann::IndexParams* getIndexParameters()
+ {
+ if (nnIndex_L1) return nnIndex_L1->getIndexParameters();
+ if (nnIndex_L2) return nnIndex_L2->getIndexParameters();
+ }
+
+private:
+ // providing backwards compatibility for L2 and L1 distances (most common)
+ ::cvflann::Index< L2<ElementType> >* nnIndex_L2;
+ ::cvflann::Index< L1<ElementType> >* nnIndex_L1;
+};
+
+#ifdef _MSC_VER
+template <typename T>
+class FLANN_DEPRECATED Index_;
+#endif
+
+template <typename T>
+Index_<T>::Index_(const Mat& dataset, const ::cvflann::IndexParams& params)
+{
+ printf("[WARNING] The cv::flann::Index_<T> class is deperecated, use cv::flann::GenericIndex<Distance> instead\n");
+
+ CV_Assert(dataset.type() == CvType<ElementType>::type());
+ CV_Assert(dataset.isContinuous());
+ ::cvflann::Matrix<ElementType> m_dataset((ElementType*)dataset.ptr<ElementType>(0), dataset.rows, dataset.cols);
+
+ if ( ::cvflann::flann_distance_type() == cvflann::FLANN_DIST_L2 ) {
+ nnIndex_L1 = NULL;
+ nnIndex_L2 = new ::cvflann::Index< L2<ElementType> >(m_dataset, params);
+ }
+ else if ( ::cvflann::flann_distance_type() == cvflann::FLANN_DIST_L1 ) {
+ nnIndex_L1 = new ::cvflann::Index< L1<ElementType> >(m_dataset, params);
+ nnIndex_L2 = NULL;
+ }
+ else {
+ printf("[ERROR] cv::flann::Index_<T> only provides backwards compatibility for the L1 and L2 distances. "
+ "For other distance types you must use cv::flann::GenericIndex<Distance>\n");
+ CV_Assert(0);
+ }
+ if (nnIndex_L1) nnIndex_L1->buildIndex();
+ if (nnIndex_L2) nnIndex_L2->buildIndex();
+}
+
+template <typename T>
+Index_<T>::~Index_()
+{
+ if (nnIndex_L1) delete nnIndex_L1;
+ if (nnIndex_L2) delete nnIndex_L2;
+}
+
+template <typename T>
+void Index_<T>::knnSearch(const vector<ElementType>& query, vector<int>& indices, vector<DistanceType>& dists, int knn, const ::cvflann::SearchParams& searchParams)
+{
+ ::cvflann::Matrix<ElementType> m_query((ElementType*)&query[0], 1, query.size());
+ ::cvflann::Matrix<int> m_indices(&indices[0], 1, indices.size());
+ ::cvflann::Matrix<DistanceType> m_dists(&dists[0], 1, dists.size());
+
+ if (nnIndex_L1) nnIndex_L1->knnSearch(m_query,m_indices,m_dists,knn,searchParams);
+ if (nnIndex_L2) nnIndex_L2->knnSearch(m_query,m_indices,m_dists,knn,searchParams);
+}
+
+
+template <typename T>
+void Index_<T>::knnSearch(const Mat& queries, Mat& indices, Mat& dists, int knn, const ::cvflann::SearchParams& searchParams)
+{
+ CV_Assert(queries.type() == CvType<ElementType>::type());
+ CV_Assert(queries.isContinuous());
+ ::cvflann::Matrix<ElementType> m_queries((ElementType*)queries.ptr<ElementType>(0), queries.rows, queries.cols);
+
+ CV_Assert(indices.type() == CV_32S);
+ CV_Assert(indices.isContinuous());
+ ::cvflann::Matrix<int> m_indices((int*)indices.ptr<int>(0), indices.rows, indices.cols);
+
+ CV_Assert(dists.type() == CvType<DistanceType>::type());
+ CV_Assert(dists.isContinuous());
+ ::cvflann::Matrix<DistanceType> m_dists((DistanceType*)dists.ptr<DistanceType>(0), dists.rows, dists.cols);
+
+ if (nnIndex_L1) nnIndex_L1->knnSearch(m_queries,m_indices,m_dists,knn, searchParams);
+ if (nnIndex_L2) nnIndex_L2->knnSearch(m_queries,m_indices,m_dists,knn, searchParams);
+}
+
+template <typename T>
+int Index_<T>::radiusSearch(const vector<ElementType>& query, vector<int>& indices, vector<DistanceType>& dists, DistanceType radius, const ::cvflann::SearchParams& searchParams)
+{
+ ::cvflann::Matrix<ElementType> m_query((ElementType*)&query[0], 1, query.size());
+ ::cvflann::Matrix<int> m_indices(&indices[0], 1, indices.size());
+ ::cvflann::Matrix<DistanceType> m_dists(&dists[0], 1, dists.size());
+
+ if (nnIndex_L1) return nnIndex_L1->radiusSearch(m_query,m_indices,m_dists,radius,searchParams);
+ if (nnIndex_L2) return nnIndex_L2->radiusSearch(m_query,m_indices,m_dists,radius,searchParams);
+}
+
+template <typename T>
+int Index_<T>::radiusSearch(const Mat& query, Mat& indices, Mat& dists, DistanceType radius, const ::cvflann::SearchParams& searchParams)
+{
+ CV_Assert(query.type() == CvType<ElementType>::type());
+ CV_Assert(query.isContinuous());
+ ::cvflann::Matrix<ElementType> m_query((ElementType*)query.ptr<ElementType>(0), query.rows, query.cols);
+
+ CV_Assert(indices.type() == CV_32S);
+ CV_Assert(indices.isContinuous());
+ ::cvflann::Matrix<int> m_indices((int*)indices.ptr<int>(0), indices.rows, indices.cols);
+
+ CV_Assert(dists.type() == CvType<DistanceType>::type());
+ CV_Assert(dists.isContinuous());
+ ::cvflann::Matrix<DistanceType> m_dists((DistanceType*)dists.ptr<DistanceType>(0), dists.rows, dists.cols);
+
+ if (nnIndex_L1) return nnIndex_L1->radiusSearch(m_query,m_indices,m_dists,radius,searchParams);
+ if (nnIndex_L2) return nnIndex_L2->radiusSearch(m_query,m_indices,m_dists,radius,searchParams);
+}
+
+
+template <typename Distance>
+int hierarchicalClustering(const Mat& features, Mat& centers, const ::cvflann::KMeansIndexParams& params,
+ Distance d = Distance())
+{
+ typedef typename Distance::ElementType ElementType;
+ typedef typename Distance::ResultType DistanceType;
+
+ CV_Assert(features.type() == CvType<ElementType>::type());
+ CV_Assert(features.isContinuous());
+ ::cvflann::Matrix<ElementType> m_features((ElementType*)features.ptr<ElementType>(0), features.rows, features.cols);
+
+ CV_Assert(centers.type() == CvType<DistanceType>::type());
+ CV_Assert(centers.isContinuous());
+ ::cvflann::Matrix<DistanceType> m_centers((DistanceType*)centers.ptr<DistanceType>(0), centers.rows, centers.cols);
+
+ return ::cvflann::hierarchicalClustering<Distance>(m_features, m_centers, params, d);
+}
+
+
+template <typename ELEM_TYPE, typename DIST_TYPE>
+FLANN_DEPRECATED int hierarchicalClustering(const Mat& features, Mat& centers, const ::cvflann::KMeansIndexParams& params)
+{
+ printf("[WARNING] cv::flann::hierarchicalClustering<ELEM_TYPE,DIST_TYPE> is deprecated, use "
+ "cv::flann::hierarchicalClustering<Distance> instead\n");
+
+ if ( ::cvflann::flann_distance_type() == cvflann::FLANN_DIST_L2 ) {
+ return hierarchicalClustering< L2<ELEM_TYPE> >(features, centers, params);
+ }
+ else if ( ::cvflann::flann_distance_type() == cvflann::FLANN_DIST_L1 ) {
+ return hierarchicalClustering< L1<ELEM_TYPE> >(features, centers, params);
+ }
+ else {
+ printf("[ERROR] cv::flann::hierarchicalClustering<ELEM_TYPE,DIST_TYPE> only provides backwards "
+ "compatibility for the L1 and L2 distances. "
+ "For other distance types you must use cv::flann::hierarchicalClustering<Distance>\n");
+ CV_Assert(0);
+ }
+}
+
+} } // namespace cv::flann
+
+#endif // __cplusplus
+
+#endif
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/flann_base.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/flann_base.hpp
new file mode 100644
index 00000000..bb5b1200
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/flann_base.hpp
@@ -0,0 +1,301 @@
+/***********************************************************************
+ * Software License Agreement (BSD License)
+ *
+ * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved.
+ * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved.
+ *
+ * THE BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *************************************************************************/
+
+#ifndef OPENCV_FLANN_BASE_HPP_
+#define OPENCV_FLANN_BASE_HPP_
+
+#include <vector>
+#include <string>
+#include <cassert>
+#include <cstdio>
+
+#include "general.h"
+#include "matrix.h"
+#include "params.h"
+#include "saving.h"
+
+#include "all_indices.h"
+
+namespace cvflann
+{
+
+/**
+ * Sets the log level used for all flann functions
+ * @param level Verbosity level
+ */
+inline void log_verbosity(int level)
+{
+ if (level >= 0) {
+ Logger::setLevel(level);
+ }
+}
+
+/**
+ * (Deprecated) Index parameters for creating a saved index.
+ */
+struct SavedIndexParams : public IndexParams
+{
+ SavedIndexParams(std::string filename)
+ {
+ (* this)["algorithm"] = FLANN_INDEX_SAVED;
+ (*this)["filename"] = filename;
+ }
+};
+
+
+template<typename Distance>
+NNIndex<Distance>* load_saved_index(const Matrix<typename Distance::ElementType>& dataset, const std::string& filename, Distance distance)
+{
+ typedef typename Distance::ElementType ElementType;
+
+ FILE* fin = fopen(filename.c_str(), "rb");
+ if (fin == NULL) {
+ return NULL;
+ }
+ IndexHeader header = load_header(fin);
+ if (header.data_type != Datatype<ElementType>::type()) {
+ throw FLANNException("Datatype of saved index is different than of the one to be created.");
+ }
+ if ((size_t(header.rows) != dataset.rows)||(size_t(header.cols) != dataset.cols)) {
+ throw FLANNException("The index saved belongs to a different dataset");
+ }
+
+ IndexParams params;
+ params["algorithm"] = header.index_type;
+ NNIndex<Distance>* nnIndex = create_index_by_type<Distance>(dataset, params, distance);
+ nnIndex->loadIndex(fin);
+ fclose(fin);
+
+ return nnIndex;
+}
+
+
+template<typename Distance>
+class Index : public NNIndex<Distance>
+{
+public:
+ typedef typename Distance::ElementType ElementType;
+ typedef typename Distance::ResultType DistanceType;
+
+ Index(const Matrix<ElementType>& features, const IndexParams& params, Distance distance = Distance() )
+ : index_params_(params)
+ {
+ flann_algorithm_t index_type = get_param<flann_algorithm_t>(params,"algorithm");
+ loaded_ = false;
+
+ if (index_type == FLANN_INDEX_SAVED) {
+ nnIndex_ = load_saved_index<Distance>(features, get_param<std::string>(params,"filename"), distance);
+ loaded_ = true;
+ }
+ else {
+ nnIndex_ = create_index_by_type<Distance>(features, params, distance);
+ }
+ }
+
+ ~Index()
+ {
+ delete nnIndex_;
+ }
+
+ /**
+ * implementation for algorithms of addable indexes after that.
+ */
+ void addIndex(const Matrix<ElementType>& wholeData, const Matrix<ElementType>& additionalData)
+ {
+ if (!loaded_) {
+ nnIndex_->addIndex(wholeData, additionalData);
+ }
+ }
+
+ /**
+ * Builds the index.
+ */
+ void buildIndex()
+ {
+ if (!loaded_) {
+ nnIndex_->buildIndex();
+ }
+ }
+
+ void save(std::string filename)
+ {
+ FILE* fout = fopen(filename.c_str(), "wb");
+ if (fout == NULL) {
+ throw FLANNException("Cannot open file");
+ }
+ save_header(fout, *nnIndex_);
+ saveIndex(fout);
+ fclose(fout);
+ }
+
+ /**
+ * \brief Saves the index to a stream
+ * \param stream The stream to save the index to
+ */
+ virtual void saveIndex(FILE* stream)
+ {
+ nnIndex_->saveIndex(stream);
+ }
+
+ /**
+ * \brief Loads the index from a stream
+ * \param stream The stream from which the index is loaded
+ */
+ virtual void loadIndex(FILE* stream)
+ {
+ nnIndex_->loadIndex(stream);
+ }
+
+ /**
+ * \returns number of features in this index.
+ */
+ size_t veclen() const
+ {
+ return nnIndex_->veclen();
+ }
+
+ /**
+ * \returns The dimensionality of the features in this index.
+ */
+ size_t size() const
+ {
+ return nnIndex_->size();
+ }
+
+ /**
+ * \returns The index type (kdtree, kmeans,...)
+ */
+ flann_algorithm_t getType() const
+ {
+ return nnIndex_->getType();
+ }
+
+ /**
+ * \returns The amount of memory (in bytes) used by the index.
+ */
+ virtual int usedMemory() const
+ {
+ return nnIndex_->usedMemory();
+ }
+
+
+ /**
+ * \returns The index parameters
+ */
+ IndexParams getParameters() const
+ {
+ return nnIndex_->getParameters();
+ }
+
+ /**
+ * \brief Perform k-nearest neighbor search
+ * \param[in] queries The query points for which to find the nearest neighbors
+ * \param[out] indices The indices of the nearest neighbors found
+ * \param[out] dists Distances to the nearest neighbors found
+ * \param[in] knn Number of nearest neighbors to return
+ * \param[in] params Search parameters
+ */
+ void knnSearch(const Matrix<ElementType>& queries, Matrix<int>& indices, Matrix<DistanceType>& dists, int knn, const SearchParams& params)
+ {
+ nnIndex_->knnSearch(queries, indices, dists, knn, params);
+ }
+
+ /**
+ * \brief Perform radius search
+ * \param[in] query The query point
+ * \param[out] indices The indinces of the neighbors found within the given radius
+ * \param[out] dists The distances to the nearest neighbors found
+ * \param[in] radius The radius used for search
+ * \param[in] params Search parameters
+ * \returns Number of neighbors found
+ */
+ int radiusSearch(const Matrix<ElementType>& query, Matrix<int>& indices, Matrix<DistanceType>& dists, float radius, const SearchParams& params)
+ {
+ return nnIndex_->radiusSearch(query, indices, dists, radius, params);
+ }
+
+ /**
+ * \brief Method that searches for nearest-neighbours
+ */
+ void findNeighbors(ResultSet<DistanceType>& result, const ElementType* vec, const SearchParams& searchParams)
+ {
+ nnIndex_->findNeighbors(result, vec, searchParams);
+ }
+
+ /**
+ * \brief Returns actual index
+ */
+ FLANN_DEPRECATED NNIndex<Distance>* getIndex()
+ {
+ return nnIndex_;
+ }
+
+ /**
+ * \brief Returns index parameters.
+ * \deprecated use getParameters() instead.
+ */
+ FLANN_DEPRECATED const IndexParams* getIndexParameters()
+ {
+ return &index_params_;
+ }
+
+private:
+ /** Pointer to actual index class */
+ NNIndex<Distance>* nnIndex_;
+ /** Indices if the index was loaded from a file */
+ bool loaded_;
+ /** Parameters passed to the index */
+ IndexParams index_params_;
+};
+
+/**
+ * Performs a hierarchical clustering of the points passed as argument and then takes a cut in the
+ * the clustering tree to return a flat clustering.
+ * @param[in] points Points to be clustered
+ * @param centers The computed cluster centres. Matrix should be preallocated and centers.rows is the
+ * number of clusters requested.
+ * @param params Clustering parameters (The same as for cvflann::KMeansIndex)
+ * @param d Distance to be used for clustering (eg: cvflann::L2)
+ * @return number of clusters computed (can be different than clusters.rows and is the highest number
+ * of the form (branching-1)*K+1 smaller than clusters.rows).
+ */
+template <typename Distance>
+int hierarchicalClustering(const Matrix<typename Distance::ElementType>& points, Matrix<typename Distance::ResultType>& centers,
+ const KMeansIndexParams& params, Distance d = Distance())
+{
+ KMeansIndex<Distance> kmeans(points, params, d);
+ kmeans.buildIndex();
+
+ int clusterNum = kmeans.getClusterCenters(centers);
+ return clusterNum;
+}
+
+}
+#endif /* OPENCV_FLANN_BASE_HPP_ */
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/general.h b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/general.h
new file mode 100644
index 00000000..87e7e2f2
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/general.h
@@ -0,0 +1,52 @@
+/***********************************************************************
+ * Software License Agreement (BSD License)
+ *
+ * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved.
+ * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved.
+ *
+ * THE BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *************************************************************************/
+
+#ifndef OPENCV_FLANN_GENERAL_H_
+#define OPENCV_FLANN_GENERAL_H_
+
+#include "defines.h"
+#include <stdexcept>
+#include <cassert>
+
+namespace cvflann
+{
+
+class FLANNException : public std::runtime_error
+{
+public:
+ FLANNException(const char* message) : std::runtime_error(message) { }
+
+ FLANNException(const std::string& message) : std::runtime_error(message) { }
+};
+
+}
+
+
+#endif /* OPENCV_FLANN_GENERAL_H_ */
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/ground_truth.h b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/ground_truth.h
new file mode 100644
index 00000000..fd8f3ae7
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/ground_truth.h
@@ -0,0 +1,94 @@
+/***********************************************************************
+ * Software License Agreement (BSD License)
+ *
+ * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved.
+ * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved.
+ *
+ * THE BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *************************************************************************/
+
+#ifndef OPENCV_FLANN_GROUND_TRUTH_H_
+#define OPENCV_FLANN_GROUND_TRUTH_H_
+
+#include "dist.h"
+#include "matrix.h"
+
+
+namespace cvflann
+{
+
+template <typename Distance>
+void find_nearest(const Matrix<typename Distance::ElementType>& dataset, typename Distance::ElementType* query, int* matches, int nn,
+ int skip = 0, Distance distance = Distance())
+{
+ typedef typename Distance::ResultType DistanceType;
+ int n = nn + skip;
+
+ std::vector<int> match(n);
+ std::vector<DistanceType> dists(n);
+
+ dists[0] = distance(dataset[0], query, dataset.cols);
+ match[0] = 0;
+ int dcnt = 1;
+
+ for (size_t i=1; i<dataset.rows; ++i) {
+ DistanceType tmp = distance(dataset[i], query, dataset.cols);
+
+ if (dcnt<n) {
+ match[dcnt] = (int)i;
+ dists[dcnt++] = tmp;
+ }
+ else if (tmp < dists[dcnt-1]) {
+ dists[dcnt-1] = tmp;
+ match[dcnt-1] = (int)i;
+ }
+
+ int j = dcnt-1;
+ // bubble up
+ while (j>=1 && dists[j]<dists[j-1]) {
+ std::swap(dists[j],dists[j-1]);
+ std::swap(match[j],match[j-1]);
+ j--;
+ }
+ }
+
+ for (int i=0; i<nn; ++i) {
+ matches[i] = match[i+skip];
+ }
+}
+
+
+template <typename Distance>
+void compute_ground_truth(const Matrix<typename Distance::ElementType>& dataset, const Matrix<typename Distance::ElementType>& testset, Matrix<int>& matches,
+ int skip=0, Distance d = Distance())
+{
+ for (size_t i=0; i<testset.rows; ++i) {
+ find_nearest<Distance>(dataset, testset[i], matches[i], (int)matches.cols, skip, d);
+ }
+}
+
+
+}
+
+#endif //OPENCV_FLANN_GROUND_TRUTH_H_
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/hdf5.h b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/hdf5.h
new file mode 100644
index 00000000..ef3e9997
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/hdf5.h
@@ -0,0 +1,231 @@
+/***********************************************************************
+ * Software License Agreement (BSD License)
+ *
+ * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved.
+ * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *************************************************************************/
+
+
+#ifndef OPENCV_FLANN_HDF5_H_
+#define OPENCV_FLANN_HDF5_H_
+
+#include <hdf5.h>
+
+#include "matrix.h"
+
+
+namespace cvflann
+{
+
+namespace
+{
+
+template<typename T>
+hid_t get_hdf5_type()
+{
+ throw FLANNException("Unsupported type for IO operations");
+}
+
+template<>
+hid_t get_hdf5_type<char>() { return H5T_NATIVE_CHAR; }
+template<>
+hid_t get_hdf5_type<unsigned char>() { return H5T_NATIVE_UCHAR; }
+template<>
+hid_t get_hdf5_type<short int>() { return H5T_NATIVE_SHORT; }
+template<>
+hid_t get_hdf5_type<unsigned short int>() { return H5T_NATIVE_USHORT; }
+template<>
+hid_t get_hdf5_type<int>() { return H5T_NATIVE_INT; }
+template<>
+hid_t get_hdf5_type<unsigned int>() { return H5T_NATIVE_UINT; }
+template<>
+hid_t get_hdf5_type<long>() { return H5T_NATIVE_LONG; }
+template<>
+hid_t get_hdf5_type<unsigned long>() { return H5T_NATIVE_ULONG; }
+template<>
+hid_t get_hdf5_type<float>() { return H5T_NATIVE_FLOAT; }
+template<>
+hid_t get_hdf5_type<double>() { return H5T_NATIVE_DOUBLE; }
+}
+
+
+#define CHECK_ERROR(x,y) if ((x)<0) throw FLANNException((y));
+
+template<typename T>
+void save_to_file(const cvflann::Matrix<T>& dataset, const std::string& filename, const std::string& name)
+{
+
+#if H5Eset_auto_vers == 2
+ H5Eset_auto( H5E_DEFAULT, NULL, NULL );
+#else
+ H5Eset_auto( NULL, NULL );
+#endif
+
+ herr_t status;
+ hid_t file_id;
+ file_id = H5Fopen(filename.c_str(), H5F_ACC_RDWR, H5P_DEFAULT);
+ if (file_id < 0) {
+ file_id = H5Fcreate(filename.c_str(), H5F_ACC_EXCL, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ CHECK_ERROR(file_id,"Error creating hdf5 file.");
+
+ hsize_t dimsf[2]; // dataset dimensions
+ dimsf[0] = dataset.rows;
+ dimsf[1] = dataset.cols;
+
+ hid_t space_id = H5Screate_simple(2, dimsf, NULL);
+ hid_t memspace_id = H5Screate_simple(2, dimsf, NULL);
+
+ hid_t dataset_id;
+#if H5Dcreate_vers == 2
+ dataset_id = H5Dcreate2(file_id, name.c_str(), get_hdf5_type<T>(), space_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+#else
+ dataset_id = H5Dcreate(file_id, name.c_str(), get_hdf5_type<T>(), space_id, H5P_DEFAULT);
+#endif
+
+ if (dataset_id<0) {
+#if H5Dopen_vers == 2
+ dataset_id = H5Dopen2(file_id, name.c_str(), H5P_DEFAULT);
+#else
+ dataset_id = H5Dopen(file_id, name.c_str());
+#endif
+ }
+ CHECK_ERROR(dataset_id,"Error creating or opening dataset in file.");
+
+ status = H5Dwrite(dataset_id, get_hdf5_type<T>(), memspace_id, space_id, H5P_DEFAULT, dataset.data );
+ CHECK_ERROR(status, "Error writing to dataset");
+
+ H5Sclose(memspace_id);
+ H5Sclose(space_id);
+ H5Dclose(dataset_id);
+ H5Fclose(file_id);
+
+}
+
+
+template<typename T>
+void load_from_file(cvflann::Matrix<T>& dataset, const std::string& filename, const std::string& name)
+{
+ herr_t status;
+ hid_t file_id = H5Fopen(filename.c_str(), H5F_ACC_RDWR, H5P_DEFAULT);
+ CHECK_ERROR(file_id,"Error opening hdf5 file.");
+
+ hid_t dataset_id;
+#if H5Dopen_vers == 2
+ dataset_id = H5Dopen2(file_id, name.c_str(), H5P_DEFAULT);
+#else
+ dataset_id = H5Dopen(file_id, name.c_str());
+#endif
+ CHECK_ERROR(dataset_id,"Error opening dataset in file.");
+
+ hid_t space_id = H5Dget_space(dataset_id);
+
+ hsize_t dims_out[2];
+ H5Sget_simple_extent_dims(space_id, dims_out, NULL);
+
+ dataset = cvflann::Matrix<T>(new T[dims_out[0]*dims_out[1]], dims_out[0], dims_out[1]);
+
+ status = H5Dread(dataset_id, get_hdf5_type<T>(), H5S_ALL, H5S_ALL, H5P_DEFAULT, dataset[0]);
+ CHECK_ERROR(status, "Error reading dataset");
+
+ H5Sclose(space_id);
+ H5Dclose(dataset_id);
+ H5Fclose(file_id);
+}
+
+
+#ifdef HAVE_MPI
+
+namespace mpi
+{
+/**
+ * Loads a the hyperslice corresponding to this processor from a hdf5 file.
+ * @param flann_dataset Dataset where the data is loaded
+ * @param filename HDF5 file name
+ * @param name Name of dataset inside file
+ */
+template<typename T>
+void load_from_file(cvflann::Matrix<T>& dataset, const std::string& filename, const std::string& name)
+{
+ MPI_Comm comm = MPI_COMM_WORLD;
+ MPI_Info info = MPI_INFO_NULL;
+
+ int mpi_size, mpi_rank;
+ MPI_Comm_size(comm, &mpi_size);
+ MPI_Comm_rank(comm, &mpi_rank);
+
+ herr_t status;
+
+ hid_t plist_id = H5Pcreate(H5P_FILE_ACCESS);
+ H5Pset_fapl_mpio(plist_id, comm, info);
+ hid_t file_id = H5Fopen(filename.c_str(), H5F_ACC_RDWR, plist_id);
+ CHECK_ERROR(file_id,"Error opening hdf5 file.");
+ H5Pclose(plist_id);
+ hid_t dataset_id;
+#if H5Dopen_vers == 2
+ dataset_id = H5Dopen2(file_id, name.c_str(), H5P_DEFAULT);
+#else
+ dataset_id = H5Dopen(file_id, name.c_str());
+#endif
+ CHECK_ERROR(dataset_id,"Error opening dataset in file.");
+
+ hid_t space_id = H5Dget_space(dataset_id);
+ hsize_t dims[2];
+ H5Sget_simple_extent_dims(space_id, dims, NULL);
+
+ hsize_t count[2];
+ hsize_t offset[2];
+
+ hsize_t item_cnt = dims[0]/mpi_size+(dims[0]%mpi_size==0 ? 0 : 1);
+ hsize_t cnt = (mpi_rank<mpi_size-1 ? item_cnt : dims[0]-item_cnt*(mpi_size-1));
+
+ count[0] = cnt;
+ count[1] = dims[1];
+ offset[0] = mpi_rank*item_cnt;
+ offset[1] = 0;
+
+ hid_t memspace_id = H5Screate_simple(2,count,NULL);
+
+ H5Sselect_hyperslab(space_id, H5S_SELECT_SET, offset, NULL, count, NULL);
+
+ dataset.rows = count[0];
+ dataset.cols = count[1];
+ dataset.data = new T[dataset.rows*dataset.cols];
+
+ plist_id = H5Pcreate(H5P_DATASET_XFER);
+ H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE);
+ status = H5Dread(dataset_id, get_hdf5_type<T>(), memspace_id, space_id, plist_id, dataset.data);
+ CHECK_ERROR(status, "Error reading dataset");
+
+ H5Pclose(plist_id);
+ H5Sclose(space_id);
+ H5Sclose(memspace_id);
+ H5Dclose(dataset_id);
+ H5Fclose(file_id);
+}
+}
+#endif // HAVE_MPI
+} // namespace cvflann::mpi
+
+#endif /* OPENCV_FLANN_HDF5_H_ */
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/heap.h b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/heap.h
new file mode 100644
index 00000000..92a6ea61
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/heap.h
@@ -0,0 +1,165 @@
+/***********************************************************************
+ * Software License Agreement (BSD License)
+ *
+ * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved.
+ * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved.
+ *
+ * THE BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *************************************************************************/
+
+#ifndef OPENCV_FLANN_HEAP_H_
+#define OPENCV_FLANN_HEAP_H_
+
+#include <algorithm>
+#include <vector>
+
+namespace cvflann
+{
+
+/**
+ * Priority Queue Implementation
+ *
+ * The priority queue is implemented with a heap. A heap is a complete
+ * (full) binary tree in which each parent is less than both of its
+ * children, but the order of the children is unspecified.
+ */
+template <typename T>
+class Heap
+{
+
+ /**
+ * Storage array for the heap.
+ * Type T must be comparable.
+ */
+ std::vector<T> heap;
+ int length;
+
+ /**
+ * Number of element in the heap
+ */
+ int count;
+
+
+
+public:
+ /**
+ * Constructor.
+ *
+ * Params:
+ * sz = heap size
+ */
+
+ Heap(int sz)
+ {
+ length = sz;
+ heap.reserve(length);
+ count = 0;
+ }
+
+ /**
+ *
+ * Returns: heap size
+ */
+ int size()
+ {
+ return count;
+ }
+
+ /**
+ * Tests if the heap is empty
+ *
+ * Returns: true is heap empty, false otherwise
+ */
+ bool empty()
+ {
+ return size()==0;
+ }
+
+ /**
+ * Clears the heap.
+ */
+ void clear()
+ {
+ heap.clear();
+ count = 0;
+ }
+
+ struct CompareT
+ {
+ bool operator()(const T& t_1, const T& t_2) const
+ {
+ return t_2 < t_1;
+ }
+ };
+
+ /**
+ * Insert a new element in the heap.
+ *
+ * We select the next empty leaf node, and then keep moving any larger
+ * parents down until the right location is found to store this element.
+ *
+ * Params:
+ * value = the new element to be inserted in the heap
+ */
+ void insert(T value)
+ {
+ /* If heap is full, then return without adding this element. */
+ if (count == length) {
+ return;
+ }
+
+ heap.push_back(value);
+ static CompareT compareT;
+ std::push_heap(heap.begin(), heap.end(), compareT);
+ ++count;
+ }
+
+
+
+ /**
+ * Returns the node of minimum value from the heap (top of the heap).
+ *
+ * Params:
+ * value = out parameter used to return the min element
+ * Returns: false if heap empty
+ */
+ bool popMin(T& value)
+ {
+ if (count == 0) {
+ return false;
+ }
+
+ value = heap[0];
+ static CompareT compareT;
+ std::pop_heap(heap.begin(), heap.end(), compareT);
+ heap.pop_back();
+ --count;
+
+ return true; /* Return old last node. */
+ }
+};
+
+}
+
+#endif //OPENCV_FLANN_HEAP_H_
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/hierarchical_clustering_index.h b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/hierarchical_clustering_index.h
new file mode 100644
index 00000000..59423ae0
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/hierarchical_clustering_index.h
@@ -0,0 +1,776 @@
+/***********************************************************************
+ * Software License Agreement (BSD License)
+ *
+ * Copyright 2008-2011 Marius Muja (mariusm@cs.ubc.ca). All rights reserved.
+ * Copyright 2008-2011 David G. Lowe (lowe@cs.ubc.ca). All rights reserved.
+ *
+ * THE BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *************************************************************************/
+
+#ifndef OPENCV_FLANN_HIERARCHICAL_CLUSTERING_INDEX_H_
+#define OPENCV_FLANN_HIERARCHICAL_CLUSTERING_INDEX_H_
+
+#include <algorithm>
+#include <string>
+#include <map>
+#include <cassert>
+#include <limits>
+#include <cmath>
+
+#include "general.h"
+#include "nn_index.h"
+#include "dist.h"
+#include "matrix.h"
+#include "result_set.h"
+#include "heap.h"
+#include "allocator.h"
+#include "random.h"
+#include "saving.h"
+
+
+namespace cvflann
+{
+
+struct HierarchicalClusteringIndexParams : public IndexParams
+{
+ HierarchicalClusteringIndexParams(int branching = 32,
+ flann_centers_init_t centers_init = FLANN_CENTERS_RANDOM,
+ int trees = 4, int leaf_size = 100)
+ {
+ (*this)["algorithm"] = FLANN_INDEX_HIERARCHICAL;
+ // The branching factor used in the hierarchical clustering
+ (*this)["branching"] = branching;
+ // Algorithm used for picking the initial cluster centers
+ (*this)["centers_init"] = centers_init;
+ // number of parallel trees to build
+ (*this)["trees"] = trees;
+ // maximum leaf size
+ (*this)["leaf_size"] = leaf_size;
+ }
+};
+
+
+/**
+ * Hierarchical index
+ *
+ * Contains a tree constructed through a hierarchical clustering
+ * and other information for indexing a set of points for nearest-neighbour matching.
+ */
+template <typename Distance>
+class HierarchicalClusteringIndex : public NNIndex<Distance>
+{
+public:
+ typedef typename Distance::ElementType ElementType;
+ typedef typename Distance::ResultType DistanceType;
+
+private:
+
+
+ typedef void (HierarchicalClusteringIndex::* centersAlgFunction)(int, int*, int, int*, int&);
+
+ /**
+ * The function used for choosing the cluster centers.
+ */
+ centersAlgFunction chooseCenters;
+
+
+
+ /**
+ * Chooses the initial centers in the k-means clustering in a random manner.
+ *
+ * Params:
+ * k = number of centers
+ * vecs = the dataset of points
+ * indices = indices in the dataset
+ * indices_length = length of indices vector
+ *
+ */
+ void chooseCentersRandom(int k, int* dsindices, int indices_length, int* centers, int& centers_length)
+ {
+ UniqueRandom r(indices_length);
+
+ int index;
+ for (index=0; index<k; ++index) {
+ bool duplicate = true;
+ int rnd;
+ while (duplicate) {
+ duplicate = false;
+ rnd = r.next();
+ if (rnd<0) {
+ centers_length = index;
+ return;
+ }
+
+ centers[index] = dsindices[rnd];
+
+ for (int j=0; j<index; ++j) {
+ DistanceType sq = distance(dataset[centers[index]], dataset[centers[j]], dataset.cols);
+ if (sq<1e-16) {
+ duplicate = true;
+ }
+ }
+ }
+ }
+
+ centers_length = index;
+ }
+
+
+ /**
+ * Chooses the initial centers in the k-means using Gonzales' algorithm
+ * so that the centers are spaced apart from each other.
+ *
+ * Params:
+ * k = number of centers
+ * vecs = the dataset of points
+ * indices = indices in the dataset
+ * Returns:
+ */
+ void chooseCentersGonzales(int k, int* dsindices, int indices_length, int* centers, int& centers_length)
+ {
+ int n = indices_length;
+
+ int rnd = rand_int(n);
+ assert(rnd >=0 && rnd < n);
+
+ centers[0] = dsindices[rnd];
+
+ int index;
+ for (index=1; index<k; ++index) {
+
+ int best_index = -1;
+ DistanceType best_val = 0;
+ for (int j=0; j<n; ++j) {
+ DistanceType dist = distance(dataset[centers[0]],dataset[dsindices[j]],dataset.cols);
+ for (int i=1; i<index; ++i) {
+ DistanceType tmp_dist = distance(dataset[centers[i]],dataset[dsindices[j]],dataset.cols);
+ if (tmp_dist<dist) {
+ dist = tmp_dist;
+ }
+ }
+ if (dist>best_val) {
+ best_val = dist;
+ best_index = j;
+ }
+ }
+ if (best_index!=-1) {
+ centers[index] = dsindices[best_index];
+ }
+ else {
+ break;
+ }
+ }
+ centers_length = index;
+ }
+
+
+ /**
+ * Chooses the initial centers in the k-means using the algorithm
+ * proposed in the KMeans++ paper:
+ * Arthur, David; Vassilvitskii, Sergei - k-means++: The Advantages of Careful Seeding
+ *
+ * Implementation of this function was converted from the one provided in Arthur's code.
+ *
+ * Params:
+ * k = number of centers
+ * vecs = the dataset of points
+ * indices = indices in the dataset
+ * Returns:
+ */
+ void chooseCentersKMeanspp(int k, int* dsindices, int indices_length, int* centers, int& centers_length)
+ {
+ int n = indices_length;
+
+ double currentPot = 0;
+ DistanceType* closestDistSq = new DistanceType[n];
+
+ // Choose one random center and set the closestDistSq values
+ int index = rand_int(n);
+ assert(index >=0 && index < n);
+ centers[0] = dsindices[index];
+
+ // Computing distance^2 will have the advantage of even higher probability further to pick new centers
+ // far from previous centers (and this complies to "k-means++: the advantages of careful seeding" article)
+ for (int i = 0; i < n; i++) {
+ closestDistSq[i] = distance(dataset[dsindices[i]], dataset[dsindices[index]], dataset.cols);
+ closestDistSq[i] = ensureSquareDistance<Distance>( closestDistSq[i] );
+ currentPot += closestDistSq[i];
+ }
+
+
+ const int numLocalTries = 1;
+
+ // Choose each center
+ int centerCount;
+ for (centerCount = 1; centerCount < k; centerCount++) {
+
+ // Repeat several trials
+ double bestNewPot = -1;
+ int bestNewIndex = 0;
+ for (int localTrial = 0; localTrial < numLocalTries; localTrial++) {
+
+ // Choose our center - have to be slightly careful to return a valid answer even accounting
+ // for possible rounding errors
+ double randVal = rand_double(currentPot);
+ for (index = 0; index < n-1; index++) {
+ if (randVal <= closestDistSq[index]) break;
+ else randVal -= closestDistSq[index];
+ }
+
+ // Compute the new potential
+ double newPot = 0;
+ for (int i = 0; i < n; i++) {
+ DistanceType dist = distance(dataset[dsindices[i]], dataset[dsindices[index]], dataset.cols);
+ newPot += std::min( ensureSquareDistance<Distance>(dist), closestDistSq[i] );
+ }
+
+ // Store the best result
+ if ((bestNewPot < 0)||(newPot < bestNewPot)) {
+ bestNewPot = newPot;
+ bestNewIndex = index;
+ }
+ }
+
+ // Add the appropriate center
+ centers[centerCount] = dsindices[bestNewIndex];
+ currentPot = bestNewPot;
+ for (int i = 0; i < n; i++) {
+ DistanceType dist = distance(dataset[dsindices[i]], dataset[dsindices[bestNewIndex]], dataset.cols);
+ closestDistSq[i] = std::min( ensureSquareDistance<Distance>(dist), closestDistSq[i] );
+ }
+ }
+
+ centers_length = centerCount;
+
+ delete[] closestDistSq;
+ }
+
+
+public:
+
+
+ /**
+ * Index constructor
+ *
+ * Params:
+ * inputData = dataset with the input features
+ * params = parameters passed to the hierarchical k-means algorithm
+ */
+ HierarchicalClusteringIndex(const Matrix<ElementType>& inputData, const IndexParams& index_params = HierarchicalClusteringIndexParams(),
+ Distance d = Distance())
+ : dataset(inputData), params(index_params), root(NULL), indices(NULL), distance(d)
+ {
+ memoryCounter = 0;
+
+ size_ = dataset.rows;
+ veclen_ = dataset.cols;
+
+ branching_ = get_param(params,"branching",32);
+ centers_init_ = get_param(params,"centers_init", FLANN_CENTERS_RANDOM);
+ trees_ = get_param(params,"trees",4);
+ leaf_size_ = get_param(params,"leaf_size",100);
+
+ if (centers_init_==FLANN_CENTERS_RANDOM) {
+ chooseCenters = &HierarchicalClusteringIndex::chooseCentersRandom;
+ }
+ else if (centers_init_==FLANN_CENTERS_GONZALES) {
+ chooseCenters = &HierarchicalClusteringIndex::chooseCentersGonzales;
+ }
+ else if (centers_init_==FLANN_CENTERS_KMEANSPP) {
+ chooseCenters = &HierarchicalClusteringIndex::chooseCentersKMeanspp;
+ }
+ else {
+ throw FLANNException("Unknown algorithm for choosing initial centers.");
+ }
+
+ trees_ = get_param(params,"trees",4);
+ root = new NodePtr[trees_];
+ indices = new int*[trees_];
+
+ for (int i=0; i<trees_; ++i) {
+ root[i] = NULL;
+ indices[i] = NULL;
+ }
+ }
+
+ HierarchicalClusteringIndex(const HierarchicalClusteringIndex&);
+ HierarchicalClusteringIndex& operator=(const HierarchicalClusteringIndex&);
+
+ /**
+ * Index destructor.
+ *
+ * Release the memory used by the index.
+ */
+ virtual ~HierarchicalClusteringIndex()
+ {
+ free_elements();
+
+ if (root!=NULL) {
+ delete[] root;
+ }
+
+ if (indices!=NULL) {
+ delete[] indices;
+ }
+ }
+
+
+ /**
+ * Release the inner elements of indices[]
+ */
+ void free_elements()
+ {
+ if (indices!=NULL) {
+ for(int i=0; i<trees_; ++i) {
+ if (indices[i]!=NULL) {
+ delete[] indices[i];
+ indices[i] = NULL;
+ }
+ }
+ }
+ }
+
+
+ /**
+ * Returns size of index.
+ */
+ size_t size() const
+ {
+ return size_;
+ }
+
+ /**
+ * Returns the length of an index feature.
+ */
+ size_t veclen() const
+ {
+ return veclen_;
+ }
+
+
+ /**
+ * Computes the inde memory usage
+ * Returns: memory used by the index
+ */
+ int usedMemory() const
+ {
+ return pool.usedMemory+pool.wastedMemory+memoryCounter;
+ }
+
+
+ /**
+ * Dummy implementation for other algorithms of addable indexes after that.
+ */
+ void addIndex(const Matrix<ElementType>& /*wholeData*/, const Matrix<ElementType>& /*additionalData*/)
+ {
+ }
+
+ /**
+ * Builds the index
+ */
+ void buildIndex()
+ {
+ if (branching_<2) {
+ throw FLANNException("Branching factor must be at least 2");
+ }
+
+ free_elements();
+
+ for (int i=0; i<trees_; ++i) {
+ indices[i] = new int[size_];
+ for (size_t j=0; j<size_; ++j) {
+ indices[i][j] = (int)j;
+ }
+ root[i] = pool.allocate<Node>();
+ computeClustering(root[i], indices[i], (int)size_, branching_,0);
+ }
+ }
+
+
+ flann_algorithm_t getType() const
+ {
+ return FLANN_INDEX_HIERARCHICAL;
+ }
+
+
+ void saveIndex(FILE* stream)
+ {
+ save_value(stream, branching_);
+ save_value(stream, trees_);
+ save_value(stream, centers_init_);
+ save_value(stream, leaf_size_);
+ save_value(stream, memoryCounter);
+ for (int i=0; i<trees_; ++i) {
+ save_value(stream, *indices[i], size_);
+ save_tree(stream, root[i], i);
+ }
+
+ }
+
+
+ void loadIndex(FILE* stream)
+ {
+ free_elements();
+
+ if (root!=NULL) {
+ delete[] root;
+ }
+
+ if (indices!=NULL) {
+ delete[] indices;
+ }
+
+ load_value(stream, branching_);
+ load_value(stream, trees_);
+ load_value(stream, centers_init_);
+ load_value(stream, leaf_size_);
+ load_value(stream, memoryCounter);
+
+ indices = new int*[trees_];
+ root = new NodePtr[trees_];
+ for (int i=0; i<trees_; ++i) {
+ indices[i] = new int[size_];
+ load_value(stream, *indices[i], size_);
+ load_tree(stream, root[i], i);
+ }
+
+ params["algorithm"] = getType();
+ params["branching"] = branching_;
+ params["trees"] = trees_;
+ params["centers_init"] = centers_init_;
+ params["leaf_size"] = leaf_size_;
+ }
+
+
+ /**
+ * Find set of nearest neighbors to vec. Their indices are stored inside
+ * the result object.
+ *
+ * Params:
+ * result = the result object in which the indices of the nearest-neighbors are stored
+ * vec = the vector for which to search the nearest neighbors
+ * searchParams = parameters that influence the search algorithm (checks)
+ */
+ void findNeighbors(ResultSet<DistanceType>& result, const ElementType* vec, const SearchParams& searchParams)
+ {
+
+ int maxChecks = get_param(searchParams,"checks",32);
+
+ // Priority queue storing intermediate branches in the best-bin-first search
+ Heap<BranchSt>* heap = new Heap<BranchSt>((int)size_);
+
+ std::vector<bool> checked(size_,false);
+ int checks = 0;
+ for (int i=0; i<trees_; ++i) {
+ findNN(root[i], result, vec, checks, maxChecks, heap, checked);
+ }
+
+ BranchSt branch;
+ while (heap->popMin(branch) && (checks<maxChecks || !result.full())) {
+ NodePtr node = branch.node;
+ findNN(node, result, vec, checks, maxChecks, heap, checked);
+ }
+ assert(result.full());
+
+ delete heap;
+
+ }
+
+ IndexParams getParameters() const
+ {
+ return params;
+ }
+
+
+private:
+
+ /**
+ * Struture representing a node in the hierarchical k-means tree.
+ */
+ struct Node
+ {
+ /**
+ * The cluster center index
+ */
+ int pivot;
+ /**
+ * The cluster size (number of points in the cluster)
+ */
+ int size;
+ /**
+ * Child nodes (only for non-terminal nodes)
+ */
+ Node** childs;
+ /**
+ * Node points (only for terminal nodes)
+ */
+ int* indices;
+ /**
+ * Level
+ */
+ int level;
+ };
+ typedef Node* NodePtr;
+
+
+
+ /**
+ * Alias definition for a nicer syntax.
+ */
+ typedef BranchStruct<NodePtr, DistanceType> BranchSt;
+
+
+
+ void save_tree(FILE* stream, NodePtr node, int num)
+ {
+ save_value(stream, *node);
+ if (node->childs==NULL) {
+ int indices_offset = (int)(node->indices - indices[num]);
+ save_value(stream, indices_offset);
+ }
+ else {
+ for(int i=0; i<branching_; ++i) {
+ save_tree(stream, node->childs[i], num);
+ }
+ }
+ }
+
+
+ void load_tree(FILE* stream, NodePtr& node, int num)
+ {
+ node = pool.allocate<Node>();
+ load_value(stream, *node);
+ if (node->childs==NULL) {
+ int indices_offset;
+ load_value(stream, indices_offset);
+ node->indices = indices[num] + indices_offset;
+ }
+ else {
+ node->childs = pool.allocate<NodePtr>(branching_);
+ for(int i=0; i<branching_; ++i) {
+ load_tree(stream, node->childs[i], num);
+ }
+ }
+ }
+
+
+
+
+ void computeLabels(int* dsindices, int indices_length, int* centers, int centers_length, int* labels, DistanceType& cost)
+ {
+ cost = 0;
+ for (int i=0; i<indices_length; ++i) {
+ ElementType* point = dataset[dsindices[i]];
+ DistanceType dist = distance(point, dataset[centers[0]], veclen_);
+ labels[i] = 0;
+ for (int j=1; j<centers_length; ++j) {
+ DistanceType new_dist = distance(point, dataset[centers[j]], veclen_);
+ if (dist>new_dist) {
+ labels[i] = j;
+ dist = new_dist;
+ }
+ }
+ cost += dist;
+ }
+ }
+
+ /**
+ * The method responsible with actually doing the recursive hierarchical
+ * clustering
+ *
+ * Params:
+ * node = the node to cluster
+ * indices = indices of the points belonging to the current node
+ * branching = the branching factor to use in the clustering
+ *
+ * TODO: for 1-sized clusters don't store a cluster center (it's the same as the single cluster point)
+ */
+ void computeClustering(NodePtr node, int* dsindices, int indices_length, int branching, int level)
+ {
+ node->size = indices_length;
+ node->level = level;
+
+ if (indices_length < leaf_size_) { // leaf node
+ node->indices = dsindices;
+ std::sort(node->indices,node->indices+indices_length);
+ node->childs = NULL;
+ return;
+ }
+
+ std::vector<int> centers(branching);
+ std::vector<int> labels(indices_length);
+
+ int centers_length;
+ (this->*chooseCenters)(branching, dsindices, indices_length, &centers[0], centers_length);
+
+ if (centers_length<branching) {
+ node->indices = dsindices;
+ std::sort(node->indices,node->indices+indices_length);
+ node->childs = NULL;
+ return;
+ }
+
+
+ // assign points to clusters
+ DistanceType cost;
+ computeLabels(dsindices, indices_length, &centers[0], centers_length, &labels[0], cost);
+
+ node->childs = pool.allocate<NodePtr>(branching);
+ int start = 0;
+ int end = start;
+ for (int i=0; i<branching; ++i) {
+ for (int j=0; j<indices_length; ++j) {
+ if (labels[j]==i) {
+ std::swap(dsindices[j],dsindices[end]);
+ std::swap(labels[j],labels[end]);
+ end++;
+ }
+ }
+
+ node->childs[i] = pool.allocate<Node>();
+ node->childs[i]->pivot = centers[i];
+ node->childs[i]->indices = NULL;
+ computeClustering(node->childs[i],dsindices+start, end-start, branching, level+1);
+ start=end;
+ }
+ }
+
+
+
+ /**
+ * Performs one descent in the hierarchical k-means tree. The branches not
+ * visited are stored in a priority queue.
+ *
+ * Params:
+ * node = node to explore
+ * result = container for the k-nearest neighbors found
+ * vec = query points
+ * checks = how many points in the dataset have been checked so far
+ * maxChecks = maximum dataset points to checks
+ */
+
+
+ void findNN(NodePtr node, ResultSet<DistanceType>& result, const ElementType* vec, int& checks, int maxChecks,
+ Heap<BranchSt>* heap, std::vector<bool>& checked)
+ {
+ if (node->childs==NULL) {
+ if (checks>=maxChecks) {
+ if (result.full()) return;
+ }
+ for (int i=0; i<node->size; ++i) {
+ int index = node->indices[i];
+ if (!checked[index]) {
+ DistanceType dist = distance(dataset[index], vec, veclen_);
+ result.addPoint(dist, index);
+ checked[index] = true;
+ ++checks;
+ }
+ }
+ }
+ else {
+ DistanceType* domain_distances = new DistanceType[branching_];
+ int best_index = 0;
+ domain_distances[best_index] = distance(vec, dataset[node->childs[best_index]->pivot], veclen_);
+ for (int i=1; i<branching_; ++i) {
+ domain_distances[i] = distance(vec, dataset[node->childs[i]->pivot], veclen_);
+ if (domain_distances[i]<domain_distances[best_index]) {
+ best_index = i;
+ }
+ }
+ for (int i=0; i<branching_; ++i) {
+ if (i!=best_index) {
+ heap->insert(BranchSt(node->childs[i],domain_distances[i]));
+ }
+ }
+ delete[] domain_distances;
+ findNN(node->childs[best_index],result,vec, checks, maxChecks, heap, checked);
+ }
+ }
+
+private:
+
+
+ /**
+ * The dataset used by this index
+ */
+ const Matrix<ElementType> dataset;
+
+ /**
+ * Parameters used by this index
+ */
+ IndexParams params;
+
+
+ /**
+ * Number of features in the dataset.
+ */
+ size_t size_;
+
+ /**
+ * Length of each feature.
+ */
+ size_t veclen_;
+
+ /**
+ * The root node in the tree.
+ */
+ NodePtr* root;
+
+ /**
+ * Array of indices to vectors in the dataset.
+ */
+ int** indices;
+
+
+ /**
+ * The distance
+ */
+ Distance distance;
+
+ /**
+ * Pooled memory allocator.
+ *
+ * Using a pooled memory allocator is more efficient
+ * than allocating memory directly when there is a large
+ * number small of memory allocations.
+ */
+ PooledAllocator pool;
+
+ /**
+ * Memory occupied by the index.
+ */
+ int memoryCounter;
+
+ /** index parameters */
+ int branching_;
+ int trees_;
+ flann_centers_init_t centers_init_;
+ int leaf_size_;
+
+
+};
+
+}
+
+#endif /* OPENCV_FLANN_HIERARCHICAL_CLUSTERING_INDEX_H_ */
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/index_testing.h b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/index_testing.h
new file mode 100644
index 00000000..d7640040
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/index_testing.h
@@ -0,0 +1,318 @@
+/***********************************************************************
+ * Software License Agreement (BSD License)
+ *
+ * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved.
+ * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved.
+ *
+ * THE BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *************************************************************************/
+
+#ifndef OPENCV_FLANN_INDEX_TESTING_H_
+#define OPENCV_FLANN_INDEX_TESTING_H_
+
+#include <cstring>
+#include <cassert>
+#include <cmath>
+
+#include "matrix.h"
+#include "nn_index.h"
+#include "result_set.h"
+#include "logger.h"
+#include "timer.h"
+
+
+namespace cvflann
+{
+
+inline int countCorrectMatches(int* neighbors, int* groundTruth, int n)
+{
+ int count = 0;
+ for (int i=0; i<n; ++i) {
+ for (int k=0; k<n; ++k) {
+ if (neighbors[i]==groundTruth[k]) {
+ count++;
+ break;
+ }
+ }
+ }
+ return count;
+}
+
+
+template <typename Distance>
+typename Distance::ResultType computeDistanceRaport(const Matrix<typename Distance::ElementType>& inputData, typename Distance::ElementType* target,
+ int* neighbors, int* groundTruth, int veclen, int n, const Distance& distance)
+{
+ typedef typename Distance::ResultType DistanceType;
+
+ DistanceType ret = 0;
+ for (int i=0; i<n; ++i) {
+ DistanceType den = distance(inputData[groundTruth[i]], target, veclen);
+ DistanceType num = distance(inputData[neighbors[i]], target, veclen);
+
+ if ((den==0)&&(num==0)) {
+ ret += 1;
+ }
+ else {
+ ret += num/den;
+ }
+ }
+
+ return ret;
+}
+
+template <typename Distance>
+float search_with_ground_truth(NNIndex<Distance>& index, const Matrix<typename Distance::ElementType>& inputData,
+ const Matrix<typename Distance::ElementType>& testData, const Matrix<int>& matches, int nn, int checks,
+ float& time, typename Distance::ResultType& dist, const Distance& distance, int skipMatches)
+{
+ typedef typename Distance::ResultType DistanceType;
+
+ if (matches.cols<size_t(nn)) {
+ Logger::info("matches.cols=%d, nn=%d\n",matches.cols,nn);
+
+ throw FLANNException("Ground truth is not computed for as many neighbors as requested");
+ }
+
+ KNNResultSet<DistanceType> resultSet(nn+skipMatches);
+ SearchParams searchParams(checks);
+
+ std::vector<int> indices(nn+skipMatches);
+ std::vector<DistanceType> dists(nn+skipMatches);
+ int* neighbors = &indices[skipMatches];
+
+ int correct = 0;
+ DistanceType distR = 0;
+ StartStopTimer t;
+ int repeats = 0;
+ while (t.value<0.2) {
+ repeats++;
+ t.start();
+ correct = 0;
+ distR = 0;
+ for (size_t i = 0; i < testData.rows; i++) {
+ resultSet.init(&indices[0], &dists[0]);
+ index.findNeighbors(resultSet, testData[i], searchParams);
+
+ correct += countCorrectMatches(neighbors,matches[i], nn);
+ distR += computeDistanceRaport<Distance>(inputData, testData[i], neighbors, matches[i], (int)testData.cols, nn, distance);
+ }
+ t.stop();
+ }
+ time = float(t.value/repeats);
+
+ float precicion = (float)correct/(nn*testData.rows);
+
+ dist = distR/(testData.rows*nn);
+
+ Logger::info("%8d %10.4g %10.5g %10.5g %10.5g\n",
+ checks, precicion, time, 1000.0 * time / testData.rows, dist);
+
+ return precicion;
+}
+
+
+template <typename Distance>
+float test_index_checks(NNIndex<Distance>& index, const Matrix<typename Distance::ElementType>& inputData,
+ const Matrix<typename Distance::ElementType>& testData, const Matrix<int>& matches,
+ int checks, float& precision, const Distance& distance, int nn = 1, int skipMatches = 0)
+{
+ typedef typename Distance::ResultType DistanceType;
+
+ Logger::info(" Nodes Precision(%) Time(s) Time/vec(ms) Mean dist\n");
+ Logger::info("---------------------------------------------------------\n");
+
+ float time = 0;
+ DistanceType dist = 0;
+ precision = search_with_ground_truth(index, inputData, testData, matches, nn, checks, time, dist, distance, skipMatches);
+
+ return time;
+}
+
+template <typename Distance>
+float test_index_precision(NNIndex<Distance>& index, const Matrix<typename Distance::ElementType>& inputData,
+ const Matrix<typename Distance::ElementType>& testData, const Matrix<int>& matches,
+ float precision, int& checks, const Distance& distance, int nn = 1, int skipMatches = 0)
+{
+ typedef typename Distance::ResultType DistanceType;
+ const float SEARCH_EPS = 0.001f;
+
+ Logger::info(" Nodes Precision(%) Time(s) Time/vec(ms) Mean dist\n");
+ Logger::info("---------------------------------------------------------\n");
+
+ int c2 = 1;
+ float p2;
+ int c1 = 1;
+ //float p1;
+ float time;
+ DistanceType dist;
+
+ p2 = search_with_ground_truth(index, inputData, testData, matches, nn, c2, time, dist, distance, skipMatches);
+
+ if (p2>precision) {
+ Logger::info("Got as close as I can\n");
+ checks = c2;
+ return time;
+ }
+
+ while (p2<precision) {
+ c1 = c2;
+ //p1 = p2;
+ c2 *=2;
+ p2 = search_with_ground_truth(index, inputData, testData, matches, nn, c2, time, dist, distance, skipMatches);
+ }
+
+ int cx;
+ float realPrecision;
+ if (fabs(p2-precision)>SEARCH_EPS) {
+ Logger::info("Start linear estimation\n");
+ // after we got to values in the vecinity of the desired precision
+ // use linear approximation get a better estimation
+
+ cx = (c1+c2)/2;
+ realPrecision = search_with_ground_truth(index, inputData, testData, matches, nn, cx, time, dist, distance, skipMatches);
+ while (fabs(realPrecision-precision)>SEARCH_EPS) {
+
+ if (realPrecision<precision) {
+ c1 = cx;
+ }
+ else {
+ c2 = cx;
+ }
+ cx = (c1+c2)/2;
+ if (cx==c1) {
+ Logger::info("Got as close as I can\n");
+ break;
+ }
+ realPrecision = search_with_ground_truth(index, inputData, testData, matches, nn, cx, time, dist, distance, skipMatches);
+ }
+
+ c2 = cx;
+ p2 = realPrecision;
+
+ }
+ else {
+ Logger::info("No need for linear estimation\n");
+ cx = c2;
+ realPrecision = p2;
+ }
+
+ checks = cx;
+ return time;
+}
+
+
+template <typename Distance>
+void test_index_precisions(NNIndex<Distance>& index, const Matrix<typename Distance::ElementType>& inputData,
+ const Matrix<typename Distance::ElementType>& testData, const Matrix<int>& matches,
+ float* precisions, int precisions_length, const Distance& distance, int nn = 1, int skipMatches = 0, float maxTime = 0)
+{
+ typedef typename Distance::ResultType DistanceType;
+
+ const float SEARCH_EPS = 0.001;
+
+ // make sure precisions array is sorted
+ std::sort(precisions, precisions+precisions_length);
+
+ int pindex = 0;
+ float precision = precisions[pindex];
+
+ Logger::info(" Nodes Precision(%) Time(s) Time/vec(ms) Mean dist\n");
+ Logger::info("---------------------------------------------------------\n");
+
+ int c2 = 1;
+ float p2;
+
+ int c1 = 1;
+ float p1;
+
+ float time;
+ DistanceType dist;
+
+ p2 = search_with_ground_truth(index, inputData, testData, matches, nn, c2, time, dist, distance, skipMatches);
+
+ // if precision for 1 run down the tree is already
+ // better then some of the requested precisions, then
+ // skip those
+ while (precisions[pindex]<p2 && pindex<precisions_length) {
+ pindex++;
+ }
+
+ if (pindex==precisions_length) {
+ Logger::info("Got as close as I can\n");
+ return;
+ }
+
+ for (int i=pindex; i<precisions_length; ++i) {
+
+ precision = precisions[i];
+ while (p2<precision) {
+ c1 = c2;
+ p1 = p2;
+ c2 *=2;
+ p2 = search_with_ground_truth(index, inputData, testData, matches, nn, c2, time, dist, distance, skipMatches);
+ if ((maxTime> 0)&&(time > maxTime)&&(p2<precision)) return;
+ }
+
+ int cx;
+ float realPrecision;
+ if (fabs(p2-precision)>SEARCH_EPS) {
+ Logger::info("Start linear estimation\n");
+ // after we got to values in the vecinity of the desired precision
+ // use linear approximation get a better estimation
+
+ cx = (c1+c2)/2;
+ realPrecision = search_with_ground_truth(index, inputData, testData, matches, nn, cx, time, dist, distance, skipMatches);
+ while (fabs(realPrecision-precision)>SEARCH_EPS) {
+
+ if (realPrecision<precision) {
+ c1 = cx;
+ }
+ else {
+ c2 = cx;
+ }
+ cx = (c1+c2)/2;
+ if (cx==c1) {
+ Logger::info("Got as close as I can\n");
+ break;
+ }
+ realPrecision = search_with_ground_truth(index, inputData, testData, matches, nn, cx, time, dist, distance, skipMatches);
+ }
+
+ c2 = cx;
+ p2 = realPrecision;
+
+ }
+ else {
+ Logger::info("No need for linear estimation\n");
+ cx = c2;
+ realPrecision = p2;
+ }
+
+ }
+}
+
+}
+
+#endif //OPENCV_FLANN_INDEX_TESTING_H_
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/kdtree_index.h b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/kdtree_index.h
new file mode 100644
index 00000000..1b8af4a5
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/kdtree_index.h
@@ -0,0 +1,628 @@
+/***********************************************************************
+ * Software License Agreement (BSD License)
+ *
+ * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved.
+ * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved.
+ *
+ * THE BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *************************************************************************/
+
+#ifndef OPENCV_FLANN_KDTREE_INDEX_H_
+#define OPENCV_FLANN_KDTREE_INDEX_H_
+
+#include <algorithm>
+#include <map>
+#include <cassert>
+#include <cstring>
+
+#include "general.h"
+#include "nn_index.h"
+#include "dynamic_bitset.h"
+#include "matrix.h"
+#include "result_set.h"
+#include "heap.h"
+#include "allocator.h"
+#include "random.h"
+#include "saving.h"
+
+
+namespace cvflann
+{
+
+struct KDTreeIndexParams : public IndexParams
+{
+ KDTreeIndexParams(int trees = 4)
+ {
+ (*this)["algorithm"] = FLANN_INDEX_KDTREE;
+ (*this)["trees"] = trees;
+ }
+};
+
+
+/**
+ * Randomized kd-tree index
+ *
+ * Contains the k-d trees and other information for indexing a set of points
+ * for nearest-neighbor matching.
+ */
+template <typename Distance>
+class KDTreeIndex : public NNIndex<Distance>
+{
+public:
+ typedef typename Distance::ElementType ElementType;
+ typedef typename Distance::ResultType DistanceType;
+
+
+ /**
+ * KDTree constructor
+ *
+ * Params:
+ * inputData = dataset with the input features
+ * params = parameters passed to the kdtree algorithm
+ */
+ KDTreeIndex(const Matrix<ElementType>& inputData, const IndexParams& params = KDTreeIndexParams(),
+ Distance d = Distance() ) :
+ dataset_(inputData), index_params_(params), distance_(d)
+ {
+ size_ = dataset_.rows;
+ veclen_ = dataset_.cols;
+
+ trees_ = get_param(index_params_,"trees",4);
+ tree_roots_ = new NodePtr[trees_];
+
+ // Create a permutable array of indices to the input vectors.
+ vind_.resize(size_);
+ for (size_t i = 0; i < size_; ++i) {
+ vind_[i] = int(i);
+ }
+
+ mean_ = new DistanceType[veclen_];
+ var_ = new DistanceType[veclen_];
+ }
+
+
+ KDTreeIndex(const KDTreeIndex&);
+ KDTreeIndex& operator=(const KDTreeIndex&);
+
+ /**
+ * Standard destructor
+ */
+ ~KDTreeIndex()
+ {
+ if (tree_roots_!=NULL) {
+ delete[] tree_roots_;
+ }
+ delete[] mean_;
+ delete[] var_;
+ }
+
+ /**
+ * Dummy implementation for other algorithms of addable indexes after that.
+ */
+ void addIndex(const Matrix<ElementType>& /*wholeData*/, const Matrix<ElementType>& /*additionalData*/)
+ {
+ }
+
+ /**
+ * Builds the index
+ */
+ void buildIndex()
+ {
+ /* Construct the randomized trees. */
+ for (int i = 0; i < trees_; i++) {
+ /* Randomize the order of vectors to allow for unbiased sampling. */
+ std::random_shuffle(vind_.begin(), vind_.end());
+ tree_roots_[i] = divideTree(&vind_[0], int(size_) );
+ }
+ }
+
+
+ flann_algorithm_t getType() const
+ {
+ return FLANN_INDEX_KDTREE;
+ }
+
+
+ void saveIndex(FILE* stream)
+ {
+ save_value(stream, trees_);
+ for (int i=0; i<trees_; ++i) {
+ save_tree(stream, tree_roots_[i]);
+ }
+ }
+
+
+
+ void loadIndex(FILE* stream)
+ {
+ load_value(stream, trees_);
+ if (tree_roots_!=NULL) {
+ delete[] tree_roots_;
+ }
+ tree_roots_ = new NodePtr[trees_];
+ for (int i=0; i<trees_; ++i) {
+ load_tree(stream,tree_roots_[i]);
+ }
+
+ index_params_["algorithm"] = getType();
+ index_params_["trees"] = tree_roots_;
+ }
+
+ /**
+ * Returns size of index.
+ */
+ size_t size() const
+ {
+ return size_;
+ }
+
+ /**
+ * Returns the length of an index feature.
+ */
+ size_t veclen() const
+ {
+ return veclen_;
+ }
+
+ /**
+ * Computes the inde memory usage
+ * Returns: memory used by the index
+ */
+ int usedMemory() const
+ {
+ return int(pool_.usedMemory+pool_.wastedMemory+dataset_.rows*sizeof(int)); // pool memory and vind array memory
+ }
+
+ /**
+ * Find set of nearest neighbors to vec. Their indices are stored inside
+ * the result object.
+ *
+ * Params:
+ * result = the result object in which the indices of the nearest-neighbors are stored
+ * vec = the vector for which to search the nearest neighbors
+ * maxCheck = the maximum number of restarts (in a best-bin-first manner)
+ */
+ void findNeighbors(ResultSet<DistanceType>& result, const ElementType* vec, const SearchParams& searchParams)
+ {
+ int maxChecks = get_param(searchParams,"checks", 32);
+ float epsError = 1+get_param(searchParams,"eps",0.0f);
+
+ if (maxChecks==FLANN_CHECKS_UNLIMITED) {
+ getExactNeighbors(result, vec, epsError);
+ }
+ else {
+ getNeighbors(result, vec, maxChecks, epsError);
+ }
+ }
+
+ IndexParams getParameters() const
+ {
+ return index_params_;
+ }
+
+private:
+
+
+ /*--------------------- Internal Data Structures --------------------------*/
+ struct Node
+ {
+ /**
+ * Dimension used for subdivision.
+ */
+ int divfeat;
+ /**
+ * The values used for subdivision.
+ */
+ DistanceType divval;
+ /**
+ * The child nodes.
+ */
+ Node* child1, * child2;
+ };
+ typedef Node* NodePtr;
+ typedef BranchStruct<NodePtr, DistanceType> BranchSt;
+ typedef BranchSt* Branch;
+
+
+
+ void save_tree(FILE* stream, NodePtr tree)
+ {
+ save_value(stream, *tree);
+ if (tree->child1!=NULL) {
+ save_tree(stream, tree->child1);
+ }
+ if (tree->child2!=NULL) {
+ save_tree(stream, tree->child2);
+ }
+ }
+
+
+ void load_tree(FILE* stream, NodePtr& tree)
+ {
+ tree = pool_.allocate<Node>();
+ load_value(stream, *tree);
+ if (tree->child1!=NULL) {
+ load_tree(stream, tree->child1);
+ }
+ if (tree->child2!=NULL) {
+ load_tree(stream, tree->child2);
+ }
+ }
+
+
+ /**
+ * Create a tree node that subdivides the list of vecs from vind[first]
+ * to vind[last]. The routine is called recursively on each sublist.
+ * Place a pointer to this new tree node in the location pTree.
+ *
+ * Params: pTree = the new node to create
+ * first = index of the first vector
+ * last = index of the last vector
+ */
+ NodePtr divideTree(int* ind, int count)
+ {
+ NodePtr node = pool_.allocate<Node>(); // allocate memory
+
+ /* If too few exemplars remain, then make this a leaf node. */
+ if ( count == 1) {
+ node->child1 = node->child2 = NULL; /* Mark as leaf node. */
+ node->divfeat = *ind; /* Store index of this vec. */
+ }
+ else {
+ int idx;
+ int cutfeat;
+ DistanceType cutval;
+ meanSplit(ind, count, idx, cutfeat, cutval);
+
+ node->divfeat = cutfeat;
+ node->divval = cutval;
+ node->child1 = divideTree(ind, idx);
+ node->child2 = divideTree(ind+idx, count-idx);
+ }
+
+ return node;
+ }
+
+
+ /**
+ * Choose which feature to use in order to subdivide this set of vectors.
+ * Make a random choice among those with the highest variance, and use
+ * its variance as the threshold value.
+ */
+ void meanSplit(int* ind, int count, int& index, int& cutfeat, DistanceType& cutval)
+ {
+ memset(mean_,0,veclen_*sizeof(DistanceType));
+ memset(var_,0,veclen_*sizeof(DistanceType));
+
+ /* Compute mean values. Only the first SAMPLE_MEAN values need to be
+ sampled to get a good estimate.
+ */
+ int cnt = std::min((int)SAMPLE_MEAN+1, count);
+ for (int j = 0; j < cnt; ++j) {
+ ElementType* v = dataset_[ind[j]];
+ for (size_t k=0; k<veclen_; ++k) {
+ mean_[k] += v[k];
+ }
+ }
+ for (size_t k=0; k<veclen_; ++k) {
+ mean_[k] /= cnt;
+ }
+
+ /* Compute variances (no need to divide by count). */
+ for (int j = 0; j < cnt; ++j) {
+ ElementType* v = dataset_[ind[j]];
+ for (size_t k=0; k<veclen_; ++k) {
+ DistanceType dist = v[k] - mean_[k];
+ var_[k] += dist * dist;
+ }
+ }
+ /* Select one of the highest variance indices at random. */
+ cutfeat = selectDivision(var_);
+ cutval = mean_[cutfeat];
+
+ int lim1, lim2;
+ planeSplit(ind, count, cutfeat, cutval, lim1, lim2);
+
+ if (lim1>count/2) index = lim1;
+ else if (lim2<count/2) index = lim2;
+ else index = count/2;
+
+ /* If either list is empty, it means that all remaining features
+ * are identical. Split in the middle to maintain a balanced tree.
+ */
+ if ((lim1==count)||(lim2==0)) index = count/2;
+ }
+
+
+ /**
+ * Select the top RAND_DIM largest values from v and return the index of
+ * one of these selected at random.
+ */
+ int selectDivision(DistanceType* v)
+ {
+ int num = 0;
+ size_t topind[RAND_DIM];
+
+ /* Create a list of the indices of the top RAND_DIM values. */
+ for (size_t i = 0; i < veclen_; ++i) {
+ if ((num < RAND_DIM)||(v[i] > v[topind[num-1]])) {
+ /* Put this element at end of topind. */
+ if (num < RAND_DIM) {
+ topind[num++] = i; /* Add to list. */
+ }
+ else {
+ topind[num-1] = i; /* Replace last element. */
+ }
+ /* Bubble end value down to right location by repeated swapping. */
+ int j = num - 1;
+ while (j > 0 && v[topind[j]] > v[topind[j-1]]) {
+ std::swap(topind[j], topind[j-1]);
+ --j;
+ }
+ }
+ }
+ /* Select a random integer in range [0,num-1], and return that index. */
+ int rnd = rand_int(num);
+ return (int)topind[rnd];
+ }
+
+
+ /**
+ * Subdivide the list of points by a plane perpendicular on axe corresponding
+ * to the 'cutfeat' dimension at 'cutval' position.
+ *
+ * On return:
+ * dataset[ind[0..lim1-1]][cutfeat]<cutval
+ * dataset[ind[lim1..lim2-1]][cutfeat]==cutval
+ * dataset[ind[lim2..count]][cutfeat]>cutval
+ */
+ void planeSplit(int* ind, int count, int cutfeat, DistanceType cutval, int& lim1, int& lim2)
+ {
+ /* Move vector indices for left subtree to front of list. */
+ int left = 0;
+ int right = count-1;
+ for (;; ) {
+ while (left<=right && dataset_[ind[left]][cutfeat]<cutval) ++left;
+ while (left<=right && dataset_[ind[right]][cutfeat]>=cutval) --right;
+ if (left>right) break;
+ std::swap(ind[left], ind[right]); ++left; --right;
+ }
+ lim1 = left;
+ right = count-1;
+ for (;; ) {
+ while (left<=right && dataset_[ind[left]][cutfeat]<=cutval) ++left;
+ while (left<=right && dataset_[ind[right]][cutfeat]>cutval) --right;
+ if (left>right) break;
+ std::swap(ind[left], ind[right]); ++left; --right;
+ }
+ lim2 = left;
+ }
+
+ /**
+ * Performs an exact nearest neighbor search. The exact search performs a full
+ * traversal of the tree.
+ */
+ void getExactNeighbors(ResultSet<DistanceType>& result, const ElementType* vec, float epsError)
+ {
+ // checkID -= 1; /* Set a different unique ID for each search. */
+
+ if (trees_ > 1) {
+ fprintf(stderr,"It doesn't make any sense to use more than one tree for exact search");
+ }
+ if (trees_>0) {
+ searchLevelExact(result, vec, tree_roots_[0], 0.0, epsError);
+ }
+ assert(result.full());
+ }
+
+ /**
+ * Performs the approximate nearest-neighbor search. The search is approximate
+ * because the tree traversal is abandoned after a given number of descends in
+ * the tree.
+ */
+ void getNeighbors(ResultSet<DistanceType>& result, const ElementType* vec, int maxCheck, float epsError)
+ {
+ int i;
+ BranchSt branch;
+
+ int checkCount = 0;
+ Heap<BranchSt>* heap = new Heap<BranchSt>((int)size_);
+ DynamicBitset checked(size_);
+
+ /* Search once through each tree down to root. */
+ for (i = 0; i < trees_; ++i) {
+ searchLevel(result, vec, tree_roots_[i], 0, checkCount, maxCheck, epsError, heap, checked);
+ }
+
+ /* Keep searching other branches from heap until finished. */
+ while ( heap->popMin(branch) && (checkCount < maxCheck || !result.full() )) {
+ searchLevel(result, vec, branch.node, branch.mindist, checkCount, maxCheck, epsError, heap, checked);
+ }
+
+ delete heap;
+
+ assert(result.full());
+ }
+
+
+ /**
+ * Search starting from a given node of the tree. Based on any mismatches at
+ * higher levels, all exemplars below this level must have a distance of
+ * at least "mindistsq".
+ */
+ void searchLevel(ResultSet<DistanceType>& result_set, const ElementType* vec, NodePtr node, DistanceType mindist, int& checkCount, int maxCheck,
+ float epsError, Heap<BranchSt>* heap, DynamicBitset& checked)
+ {
+ if (result_set.worstDist()<mindist) {
+ // printf("Ignoring branch, too far\n");
+ return;
+ }
+
+ /* If this is a leaf node, then do check and return. */
+ if ((node->child1 == NULL)&&(node->child2 == NULL)) {
+ /* Do not check same node more than once when searching multiple trees.
+ Once a vector is checked, we set its location in vind to the
+ current checkID.
+ */
+ int index = node->divfeat;
+ if ( checked.test(index) || ((checkCount>=maxCheck)&& result_set.full()) ) return;
+ checked.set(index);
+ checkCount++;
+
+ DistanceType dist = distance_(dataset_[index], vec, veclen_);
+ result_set.addPoint(dist,index);
+
+ return;
+ }
+
+ /* Which child branch should be taken first? */
+ ElementType val = vec[node->divfeat];
+ DistanceType diff = val - node->divval;
+ NodePtr bestChild = (diff < 0) ? node->child1 : node->child2;
+ NodePtr otherChild = (diff < 0) ? node->child2 : node->child1;
+
+ /* Create a branch record for the branch not taken. Add distance
+ of this feature boundary (we don't attempt to correct for any
+ use of this feature in a parent node, which is unlikely to
+ happen and would have only a small effect). Don't bother
+ adding more branches to heap after halfway point, as cost of
+ adding exceeds their value.
+ */
+
+ DistanceType new_distsq = mindist + distance_.accum_dist(val, node->divval, node->divfeat);
+ // if (2 * checkCount < maxCheck || !result.full()) {
+ if ((new_distsq*epsError < result_set.worstDist())|| !result_set.full()) {
+ heap->insert( BranchSt(otherChild, new_distsq) );
+ }
+
+ /* Call recursively to search next level down. */
+ searchLevel(result_set, vec, bestChild, mindist, checkCount, maxCheck, epsError, heap, checked);
+ }
+
+ /**
+ * Performs an exact search in the tree starting from a node.
+ */
+ void searchLevelExact(ResultSet<DistanceType>& result_set, const ElementType* vec, const NodePtr node, DistanceType mindist, const float epsError)
+ {
+ /* If this is a leaf node, then do check and return. */
+ if ((node->child1 == NULL)&&(node->child2 == NULL)) {
+ int index = node->divfeat;
+ DistanceType dist = distance_(dataset_[index], vec, veclen_);
+ result_set.addPoint(dist,index);
+ return;
+ }
+
+ /* Which child branch should be taken first? */
+ ElementType val = vec[node->divfeat];
+ DistanceType diff = val - node->divval;
+ NodePtr bestChild = (diff < 0) ? node->child1 : node->child2;
+ NodePtr otherChild = (diff < 0) ? node->child2 : node->child1;
+
+ /* Create a branch record for the branch not taken. Add distance
+ of this feature boundary (we don't attempt to correct for any
+ use of this feature in a parent node, which is unlikely to
+ happen and would have only a small effect). Don't bother
+ adding more branches to heap after halfway point, as cost of
+ adding exceeds their value.
+ */
+
+ DistanceType new_distsq = mindist + distance_.accum_dist(val, node->divval, node->divfeat);
+
+ /* Call recursively to search next level down. */
+ searchLevelExact(result_set, vec, bestChild, mindist, epsError);
+
+ if (new_distsq*epsError<=result_set.worstDist()) {
+ searchLevelExact(result_set, vec, otherChild, new_distsq, epsError);
+ }
+ }
+
+
+private:
+
+ enum
+ {
+ /**
+ * To improve efficiency, only SAMPLE_MEAN random values are used to
+ * compute the mean and variance at each level when building a tree.
+ * A value of 100 seems to perform as well as using all values.
+ */
+ SAMPLE_MEAN = 100,
+ /**
+ * Top random dimensions to consider
+ *
+ * When creating random trees, the dimension on which to subdivide is
+ * selected at random from among the top RAND_DIM dimensions with the
+ * highest variance. A value of 5 works well.
+ */
+ RAND_DIM=5
+ };
+
+
+ /**
+ * Number of randomized trees that are used
+ */
+ int trees_;
+
+ /**
+ * Array of indices to vectors in the dataset.
+ */
+ std::vector<int> vind_;
+
+ /**
+ * The dataset used by this index
+ */
+ const Matrix<ElementType> dataset_;
+
+ IndexParams index_params_;
+
+ size_t size_;
+ size_t veclen_;
+
+
+ DistanceType* mean_;
+ DistanceType* var_;
+
+
+ /**
+ * Array of k-d trees used to find neighbours.
+ */
+ NodePtr* tree_roots_;
+
+ /**
+ * Pooled memory allocator.
+ *
+ * Using a pooled memory allocator is more efficient
+ * than allocating memory directly when there is a large
+ * number small of memory allocations.
+ */
+ PooledAllocator pool_;
+
+ Distance distance_;
+
+
+}; // class KDTreeForest
+
+}
+
+#endif //OPENCV_FLANN_KDTREE_INDEX_H_
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/kdtree_single_index.h b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/kdtree_single_index.h
new file mode 100644
index 00000000..252fc4c5
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/kdtree_single_index.h
@@ -0,0 +1,641 @@
+/***********************************************************************
+ * Software License Agreement (BSD License)
+ *
+ * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved.
+ * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved.
+ *
+ * THE BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *************************************************************************/
+
+#ifndef OPENCV_FLANN_KDTREE_SINGLE_INDEX_H_
+#define OPENCV_FLANN_KDTREE_SINGLE_INDEX_H_
+
+#include <algorithm>
+#include <map>
+#include <cassert>
+#include <cstring>
+
+#include "general.h"
+#include "nn_index.h"
+#include "matrix.h"
+#include "result_set.h"
+#include "heap.h"
+#include "allocator.h"
+#include "random.h"
+#include "saving.h"
+
+namespace cvflann
+{
+
+struct KDTreeSingleIndexParams : public IndexParams
+{
+ KDTreeSingleIndexParams(int leaf_max_size = 10, bool reorder = true, int dim = -1)
+ {
+ (*this)["algorithm"] = FLANN_INDEX_KDTREE_SINGLE;
+ (*this)["leaf_max_size"] = leaf_max_size;
+ (*this)["reorder"] = reorder;
+ (*this)["dim"] = dim;
+ }
+};
+
+
+/**
+ * Randomized kd-tree index
+ *
+ * Contains the k-d trees and other information for indexing a set of points
+ * for nearest-neighbor matching.
+ */
+template <typename Distance>
+class KDTreeSingleIndex : public NNIndex<Distance>
+{
+public:
+ typedef typename Distance::ElementType ElementType;
+ typedef typename Distance::ResultType DistanceType;
+
+
+ /**
+ * KDTree constructor
+ *
+ * Params:
+ * inputData = dataset with the input features
+ * params = parameters passed to the kdtree algorithm
+ */
+ KDTreeSingleIndex(const Matrix<ElementType>& inputData, const IndexParams& params = KDTreeSingleIndexParams(),
+ Distance d = Distance() ) :
+ dataset_(inputData), index_params_(params), distance_(d)
+ {
+ size_ = dataset_.rows;
+ dim_ = dataset_.cols;
+ int dim_param = get_param(params,"dim",-1);
+ if (dim_param>0) dim_ = dim_param;
+ leaf_max_size_ = get_param(params,"leaf_max_size",10);
+ reorder_ = get_param(params,"reorder",true);
+
+ // Create a permutable array of indices to the input vectors.
+ vind_.resize(size_);
+ for (size_t i = 0; i < size_; i++) {
+ vind_[i] = (int)i;
+ }
+ }
+
+ KDTreeSingleIndex(const KDTreeSingleIndex&);
+ KDTreeSingleIndex& operator=(const KDTreeSingleIndex&);
+
+ /**
+ * Standard destructor
+ */
+ ~KDTreeSingleIndex()
+ {
+ if (reorder_) delete[] data_.data;
+ }
+
+ /**
+ * Dummy implementation for other algorithms of addable indexes after that.
+ */
+ void addIndex(const Matrix<ElementType>& /*wholeData*/, const Matrix<ElementType>& /*additionalData*/)
+ {
+ }
+
+ /**
+ * Builds the index
+ */
+ void buildIndex()
+ {
+ computeBoundingBox(root_bbox_);
+ root_node_ = divideTree(0, (int)size_, root_bbox_ ); // construct the tree
+
+ if (reorder_) {
+ delete[] data_.data;
+ data_ = cvflann::Matrix<ElementType>(new ElementType[size_*dim_], size_, dim_);
+ for (size_t i=0; i<size_; ++i) {
+ for (size_t j=0; j<dim_; ++j) {
+ data_[i][j] = dataset_[vind_[i]][j];
+ }
+ }
+ }
+ else {
+ data_ = dataset_;
+ }
+ }
+
+ flann_algorithm_t getType() const
+ {
+ return FLANN_INDEX_KDTREE_SINGLE;
+ }
+
+
+ void saveIndex(FILE* stream)
+ {
+ save_value(stream, size_);
+ save_value(stream, dim_);
+ save_value(stream, root_bbox_);
+ save_value(stream, reorder_);
+ save_value(stream, leaf_max_size_);
+ save_value(stream, vind_);
+ if (reorder_) {
+ save_value(stream, data_);
+ }
+ save_tree(stream, root_node_);
+ }
+
+
+ void loadIndex(FILE* stream)
+ {
+ load_value(stream, size_);
+ load_value(stream, dim_);
+ load_value(stream, root_bbox_);
+ load_value(stream, reorder_);
+ load_value(stream, leaf_max_size_);
+ load_value(stream, vind_);
+ if (reorder_) {
+ load_value(stream, data_);
+ }
+ else {
+ data_ = dataset_;
+ }
+ load_tree(stream, root_node_);
+
+
+ index_params_["algorithm"] = getType();
+ index_params_["leaf_max_size"] = leaf_max_size_;
+ index_params_["reorder"] = reorder_;
+ }
+
+ /**
+ * Returns size of index.
+ */
+ size_t size() const
+ {
+ return size_;
+ }
+
+ /**
+ * Returns the length of an index feature.
+ */
+ size_t veclen() const
+ {
+ return dim_;
+ }
+
+ /**
+ * Computes the inde memory usage
+ * Returns: memory used by the index
+ */
+ int usedMemory() const
+ {
+ return (int)(pool_.usedMemory+pool_.wastedMemory+dataset_.rows*sizeof(int)); // pool memory and vind array memory
+ }
+
+
+ /**
+ * \brief Perform k-nearest neighbor search
+ * \param[in] queries The query points for which to find the nearest neighbors
+ * \param[out] indices The indices of the nearest neighbors found
+ * \param[out] dists Distances to the nearest neighbors found
+ * \param[in] knn Number of nearest neighbors to return
+ * \param[in] params Search parameters
+ */
+ void knnSearch(const Matrix<ElementType>& queries, Matrix<int>& indices, Matrix<DistanceType>& dists, int knn, const SearchParams& params)
+ {
+ assert(queries.cols == veclen());
+ assert(indices.rows >= queries.rows);
+ assert(dists.rows >= queries.rows);
+ assert(int(indices.cols) >= knn);
+ assert(int(dists.cols) >= knn);
+
+ KNNSimpleResultSet<DistanceType> resultSet(knn);
+ for (size_t i = 0; i < queries.rows; i++) {
+ resultSet.init(indices[i], dists[i]);
+ findNeighbors(resultSet, queries[i], params);
+ }
+ }
+
+ IndexParams getParameters() const
+ {
+ return index_params_;
+ }
+
+ /**
+ * Find set of nearest neighbors to vec. Their indices are stored inside
+ * the result object.
+ *
+ * Params:
+ * result = the result object in which the indices of the nearest-neighbors are stored
+ * vec = the vector for which to search the nearest neighbors
+ * maxCheck = the maximum number of restarts (in a best-bin-first manner)
+ */
+ void findNeighbors(ResultSet<DistanceType>& result, const ElementType* vec, const SearchParams& searchParams)
+ {
+ float epsError = 1+get_param(searchParams,"eps",0.0f);
+
+ std::vector<DistanceType> dists(dim_,0);
+ DistanceType distsq = computeInitialDistances(vec, dists);
+ searchLevel(result, vec, root_node_, distsq, dists, epsError);
+ }
+
+private:
+
+
+ /*--------------------- Internal Data Structures --------------------------*/
+ struct Node
+ {
+ /**
+ * Indices of points in leaf node
+ */
+ int left, right;
+ /**
+ * Dimension used for subdivision.
+ */
+ int divfeat;
+ /**
+ * The values used for subdivision.
+ */
+ DistanceType divlow, divhigh;
+ /**
+ * The child nodes.
+ */
+ Node* child1, * child2;
+ };
+ typedef Node* NodePtr;
+
+
+ struct Interval
+ {
+ DistanceType low, high;
+ };
+
+ typedef std::vector<Interval> BoundingBox;
+
+ typedef BranchStruct<NodePtr, DistanceType> BranchSt;
+ typedef BranchSt* Branch;
+
+
+
+
+ void save_tree(FILE* stream, NodePtr tree)
+ {
+ save_value(stream, *tree);
+ if (tree->child1!=NULL) {
+ save_tree(stream, tree->child1);
+ }
+ if (tree->child2!=NULL) {
+ save_tree(stream, tree->child2);
+ }
+ }
+
+
+ void load_tree(FILE* stream, NodePtr& tree)
+ {
+ tree = pool_.allocate<Node>();
+ load_value(stream, *tree);
+ if (tree->child1!=NULL) {
+ load_tree(stream, tree->child1);
+ }
+ if (tree->child2!=NULL) {
+ load_tree(stream, tree->child2);
+ }
+ }
+
+
+ void computeBoundingBox(BoundingBox& bbox)
+ {
+ bbox.resize(dim_);
+ for (size_t i=0; i<dim_; ++i) {
+ bbox[i].low = (DistanceType)dataset_[0][i];
+ bbox[i].high = (DistanceType)dataset_[0][i];
+ }
+ for (size_t k=1; k<dataset_.rows; ++k) {
+ for (size_t i=0; i<dim_; ++i) {
+ if (dataset_[k][i]<bbox[i].low) bbox[i].low = (DistanceType)dataset_[k][i];
+ if (dataset_[k][i]>bbox[i].high) bbox[i].high = (DistanceType)dataset_[k][i];
+ }
+ }
+ }
+
+
+ /**
+ * Create a tree node that subdivides the list of vecs from vind[first]
+ * to vind[last]. The routine is called recursively on each sublist.
+ * Place a pointer to this new tree node in the location pTree.
+ *
+ * Params: pTree = the new node to create
+ * first = index of the first vector
+ * last = index of the last vector
+ */
+ NodePtr divideTree(int left, int right, BoundingBox& bbox)
+ {
+ NodePtr node = pool_.allocate<Node>(); // allocate memory
+
+ /* If too few exemplars remain, then make this a leaf node. */
+ if ( (right-left) <= leaf_max_size_) {
+ node->child1 = node->child2 = NULL; /* Mark as leaf node. */
+ node->left = left;
+ node->right = right;
+
+ // compute bounding-box of leaf points
+ for (size_t i=0; i<dim_; ++i) {
+ bbox[i].low = (DistanceType)dataset_[vind_[left]][i];
+ bbox[i].high = (DistanceType)dataset_[vind_[left]][i];
+ }
+ for (int k=left+1; k<right; ++k) {
+ for (size_t i=0; i<dim_; ++i) {
+ if (bbox[i].low>dataset_[vind_[k]][i]) bbox[i].low=(DistanceType)dataset_[vind_[k]][i];
+ if (bbox[i].high<dataset_[vind_[k]][i]) bbox[i].high=(DistanceType)dataset_[vind_[k]][i];
+ }
+ }
+ }
+ else {
+ int idx;
+ int cutfeat;
+ DistanceType cutval;
+ middleSplit_(&vind_[0]+left, right-left, idx, cutfeat, cutval, bbox);
+
+ node->divfeat = cutfeat;
+
+ BoundingBox left_bbox(bbox);
+ left_bbox[cutfeat].high = cutval;
+ node->child1 = divideTree(left, left+idx, left_bbox);
+
+ BoundingBox right_bbox(bbox);
+ right_bbox[cutfeat].low = cutval;
+ node->child2 = divideTree(left+idx, right, right_bbox);
+
+ node->divlow = left_bbox[cutfeat].high;
+ node->divhigh = right_bbox[cutfeat].low;
+
+ for (size_t i=0; i<dim_; ++i) {
+ bbox[i].low = std::min(left_bbox[i].low, right_bbox[i].low);
+ bbox[i].high = std::max(left_bbox[i].high, right_bbox[i].high);
+ }
+ }
+
+ return node;
+ }
+
+ void computeMinMax(int* ind, int count, int dim, ElementType& min_elem, ElementType& max_elem)
+ {
+ min_elem = dataset_[ind[0]][dim];
+ max_elem = dataset_[ind[0]][dim];
+ for (int i=1; i<count; ++i) {
+ ElementType val = dataset_[ind[i]][dim];
+ if (val<min_elem) min_elem = val;
+ if (val>max_elem) max_elem = val;
+ }
+ }
+
+ void middleSplit(int* ind, int count, int& index, int& cutfeat, DistanceType& cutval, const BoundingBox& bbox)
+ {
+ // find the largest span from the approximate bounding box
+ ElementType max_span = bbox[0].high-bbox[0].low;
+ cutfeat = 0;
+ cutval = (bbox[0].high+bbox[0].low)/2;
+ for (size_t i=1; i<dim_; ++i) {
+ ElementType span = bbox[i].high-bbox[i].low;
+ if (span>max_span) {
+ max_span = span;
+ cutfeat = i;
+ cutval = (bbox[i].high+bbox[i].low)/2;
+ }
+ }
+
+ // compute exact span on the found dimension
+ ElementType min_elem, max_elem;
+ computeMinMax(ind, count, cutfeat, min_elem, max_elem);
+ cutval = (min_elem+max_elem)/2;
+ max_span = max_elem - min_elem;
+
+ // check if a dimension of a largest span exists
+ size_t k = cutfeat;
+ for (size_t i=0; i<dim_; ++i) {
+ if (i==k) continue;
+ ElementType span = bbox[i].high-bbox[i].low;
+ if (span>max_span) {
+ computeMinMax(ind, count, i, min_elem, max_elem);
+ span = max_elem - min_elem;
+ if (span>max_span) {
+ max_span = span;
+ cutfeat = i;
+ cutval = (min_elem+max_elem)/2;
+ }
+ }
+ }
+ int lim1, lim2;
+ planeSplit(ind, count, cutfeat, cutval, lim1, lim2);
+
+ if (lim1>count/2) index = lim1;
+ else if (lim2<count/2) index = lim2;
+ else index = count/2;
+ }
+
+
+ void middleSplit_(int* ind, int count, int& index, int& cutfeat, DistanceType& cutval, const BoundingBox& bbox)
+ {
+ const float EPS=0.00001f;
+ DistanceType max_span = bbox[0].high-bbox[0].low;
+ for (size_t i=1; i<dim_; ++i) {
+ DistanceType span = bbox[i].high-bbox[i].low;
+ if (span>max_span) {
+ max_span = span;
+ }
+ }
+ DistanceType max_spread = -1;
+ cutfeat = 0;
+ for (size_t i=0; i<dim_; ++i) {
+ DistanceType span = bbox[i].high-bbox[i].low;
+ if (span>(DistanceType)((1-EPS)*max_span)) {
+ ElementType min_elem, max_elem;
+ computeMinMax(ind, count, cutfeat, min_elem, max_elem);
+ DistanceType spread = (DistanceType)(max_elem-min_elem);
+ if (spread>max_spread) {
+ cutfeat = (int)i;
+ max_spread = spread;
+ }
+ }
+ }
+ // split in the middle
+ DistanceType split_val = (bbox[cutfeat].low+bbox[cutfeat].high)/2;
+ ElementType min_elem, max_elem;
+ computeMinMax(ind, count, cutfeat, min_elem, max_elem);
+
+ if (split_val<min_elem) cutval = (DistanceType)min_elem;
+ else if (split_val>max_elem) cutval = (DistanceType)max_elem;
+ else cutval = split_val;
+
+ int lim1, lim2;
+ planeSplit(ind, count, cutfeat, cutval, lim1, lim2);
+
+ if (lim1>count/2) index = lim1;
+ else if (lim2<count/2) index = lim2;
+ else index = count/2;
+ }
+
+
+ /**
+ * Subdivide the list of points by a plane perpendicular on axe corresponding
+ * to the 'cutfeat' dimension at 'cutval' position.
+ *
+ * On return:
+ * dataset[ind[0..lim1-1]][cutfeat]<cutval
+ * dataset[ind[lim1..lim2-1]][cutfeat]==cutval
+ * dataset[ind[lim2..count]][cutfeat]>cutval
+ */
+ void planeSplit(int* ind, int count, int cutfeat, DistanceType cutval, int& lim1, int& lim2)
+ {
+ /* Move vector indices for left subtree to front of list. */
+ int left = 0;
+ int right = count-1;
+ for (;; ) {
+ while (left<=right && dataset_[ind[left]][cutfeat]<cutval) ++left;
+ while (left<=right && dataset_[ind[right]][cutfeat]>=cutval) --right;
+ if (left>right) break;
+ std::swap(ind[left], ind[right]); ++left; --right;
+ }
+ /* If either list is empty, it means that all remaining features
+ * are identical. Split in the middle to maintain a balanced tree.
+ */
+ lim1 = left;
+ right = count-1;
+ for (;; ) {
+ while (left<=right && dataset_[ind[left]][cutfeat]<=cutval) ++left;
+ while (left<=right && dataset_[ind[right]][cutfeat]>cutval) --right;
+ if (left>right) break;
+ std::swap(ind[left], ind[right]); ++left; --right;
+ }
+ lim2 = left;
+ }
+
+ DistanceType computeInitialDistances(const ElementType* vec, std::vector<DistanceType>& dists)
+ {
+ DistanceType distsq = 0.0;
+
+ for (size_t i = 0; i < dim_; ++i) {
+ if (vec[i] < root_bbox_[i].low) {
+ dists[i] = distance_.accum_dist(vec[i], root_bbox_[i].low, (int)i);
+ distsq += dists[i];
+ }
+ if (vec[i] > root_bbox_[i].high) {
+ dists[i] = distance_.accum_dist(vec[i], root_bbox_[i].high, (int)i);
+ distsq += dists[i];
+ }
+ }
+
+ return distsq;
+ }
+
+ /**
+ * Performs an exact search in the tree starting from a node.
+ */
+ void searchLevel(ResultSet<DistanceType>& result_set, const ElementType* vec, const NodePtr node, DistanceType mindistsq,
+ std::vector<DistanceType>& dists, const float epsError)
+ {
+ /* If this is a leaf node, then do check and return. */
+ if ((node->child1 == NULL)&&(node->child2 == NULL)) {
+ DistanceType worst_dist = result_set.worstDist();
+ for (int i=node->left; i<node->right; ++i) {
+ int index = reorder_ ? i : vind_[i];
+ DistanceType dist = distance_(vec, data_[index], dim_, worst_dist);
+ if (dist<worst_dist) {
+ result_set.addPoint(dist,vind_[i]);
+ }
+ }
+ return;
+ }
+
+ /* Which child branch should be taken first? */
+ int idx = node->divfeat;
+ ElementType val = vec[idx];
+ DistanceType diff1 = val - node->divlow;
+ DistanceType diff2 = val - node->divhigh;
+
+ NodePtr bestChild;
+ NodePtr otherChild;
+ DistanceType cut_dist;
+ if ((diff1+diff2)<0) {
+ bestChild = node->child1;
+ otherChild = node->child2;
+ cut_dist = distance_.accum_dist(val, node->divhigh, idx);
+ }
+ else {
+ bestChild = node->child2;
+ otherChild = node->child1;
+ cut_dist = distance_.accum_dist( val, node->divlow, idx);
+ }
+
+ /* Call recursively to search next level down. */
+ searchLevel(result_set, vec, bestChild, mindistsq, dists, epsError);
+
+ DistanceType dst = dists[idx];
+ mindistsq = mindistsq + cut_dist - dst;
+ dists[idx] = cut_dist;
+ if (mindistsq*epsError<=result_set.worstDist()) {
+ searchLevel(result_set, vec, otherChild, mindistsq, dists, epsError);
+ }
+ dists[idx] = dst;
+ }
+
+private:
+
+ /**
+ * The dataset used by this index
+ */
+ const Matrix<ElementType> dataset_;
+
+ IndexParams index_params_;
+
+ int leaf_max_size_;
+ bool reorder_;
+
+
+ /**
+ * Array of indices to vectors in the dataset.
+ */
+ std::vector<int> vind_;
+
+ Matrix<ElementType> data_;
+
+ size_t size_;
+ size_t dim_;
+
+ /**
+ * Array of k-d trees used to find neighbours.
+ */
+ NodePtr root_node_;
+
+ BoundingBox root_bbox_;
+
+ /**
+ * Pooled memory allocator.
+ *
+ * Using a pooled memory allocator is more efficient
+ * than allocating memory directly when there is a large
+ * number small of memory allocations.
+ */
+ PooledAllocator pool_;
+
+ Distance distance_;
+}; // class KDTree
+
+}
+
+#endif //OPENCV_FLANN_KDTREE_SINGLE_INDEX_H_
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/kmeans_index.h b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/kmeans_index.h
new file mode 100644
index 00000000..e119ceb2
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/kmeans_index.h
@@ -0,0 +1,1133 @@
+/***********************************************************************
+ * Software License Agreement (BSD License)
+ *
+ * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved.
+ * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved.
+ *
+ * THE BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *************************************************************************/
+
+#ifndef OPENCV_FLANN_KMEANS_INDEX_H_
+#define OPENCV_FLANN_KMEANS_INDEX_H_
+
+#include <algorithm>
+#include <string>
+#include <map>
+#include <cassert>
+#include <limits>
+#include <cmath>
+
+#include "general.h"
+#include "nn_index.h"
+#include "dist.h"
+#include "matrix.h"
+#include "result_set.h"
+#include "heap.h"
+#include "allocator.h"
+#include "random.h"
+#include "saving.h"
+#include "logger.h"
+
+
+namespace cvflann
+{
+
+struct KMeansIndexParams : public IndexParams
+{
+ KMeansIndexParams(int branching = 32, int iterations = 11,
+ flann_centers_init_t centers_init = FLANN_CENTERS_RANDOM, float cb_index = 0.2 )
+ {
+ (*this)["algorithm"] = FLANN_INDEX_KMEANS;
+ // branching factor
+ (*this)["branching"] = branching;
+ // max iterations to perform in one kmeans clustering (kmeans tree)
+ (*this)["iterations"] = iterations;
+ // algorithm used for picking the initial cluster centers for kmeans tree
+ (*this)["centers_init"] = centers_init;
+ // cluster boundary index. Used when searching the kmeans tree
+ (*this)["cb_index"] = cb_index;
+ }
+};
+
+
+/**
+ * Hierarchical kmeans index
+ *
+ * Contains a tree constructed through a hierarchical kmeans clustering
+ * and other information for indexing a set of points for nearest-neighbour matching.
+ */
+template <typename Distance>
+class KMeansIndex : public NNIndex<Distance>
+{
+public:
+ typedef typename Distance::ElementType ElementType;
+ typedef typename Distance::ResultType DistanceType;
+
+
+
+ typedef void (KMeansIndex::* centersAlgFunction)(int, int*, int, int*, int&);
+
+ /**
+ * The function used for choosing the cluster centers.
+ */
+ centersAlgFunction chooseCenters;
+
+
+
+ /**
+ * Chooses the initial centers in the k-means clustering in a random manner.
+ *
+ * Params:
+ * k = number of centers
+ * vecs = the dataset of points
+ * indices = indices in the dataset
+ * indices_length = length of indices vector
+ *
+ */
+ void chooseCentersRandom(int k, int* indices, int indices_length, int* centers, int& centers_length)
+ {
+ UniqueRandom r(indices_length);
+
+ int index;
+ for (index=0; index<k; ++index) {
+ bool duplicate = true;
+ int rnd;
+ while (duplicate) {
+ duplicate = false;
+ rnd = r.next();
+ if (rnd<0) {
+ centers_length = index;
+ return;
+ }
+
+ centers[index] = indices[rnd];
+
+ for (int j=0; j<index; ++j) {
+ DistanceType sq = distance_(dataset_[centers[index]], dataset_[centers[j]], dataset_.cols);
+ if (sq<1e-16) {
+ duplicate = true;
+ }
+ }
+ }
+ }
+
+ centers_length = index;
+ }
+
+
+ /**
+ * Chooses the initial centers in the k-means using Gonzales' algorithm
+ * so that the centers are spaced apart from each other.
+ *
+ * Params:
+ * k = number of centers
+ * vecs = the dataset of points
+ * indices = indices in the dataset
+ * Returns:
+ */
+ void chooseCentersGonzales(int k, int* indices, int indices_length, int* centers, int& centers_length)
+ {
+ int n = indices_length;
+
+ int rnd = rand_int(n);
+ assert(rnd >=0 && rnd < n);
+
+ centers[0] = indices[rnd];
+
+ int index;
+ for (index=1; index<k; ++index) {
+
+ int best_index = -1;
+ DistanceType best_val = 0;
+ for (int j=0; j<n; ++j) {
+ DistanceType dist = distance_(dataset_[centers[0]],dataset_[indices[j]],dataset_.cols);
+ for (int i=1; i<index; ++i) {
+ DistanceType tmp_dist = distance_(dataset_[centers[i]],dataset_[indices[j]],dataset_.cols);
+ if (tmp_dist<dist) {
+ dist = tmp_dist;
+ }
+ }
+ if (dist>best_val) {
+ best_val = dist;
+ best_index = j;
+ }
+ }
+ if (best_index!=-1) {
+ centers[index] = indices[best_index];
+ }
+ else {
+ break;
+ }
+ }
+ centers_length = index;
+ }
+
+
+ /**
+ * Chooses the initial centers in the k-means using the algorithm
+ * proposed in the KMeans++ paper:
+ * Arthur, David; Vassilvitskii, Sergei - k-means++: The Advantages of Careful Seeding
+ *
+ * Implementation of this function was converted from the one provided in Arthur's code.
+ *
+ * Params:
+ * k = number of centers
+ * vecs = the dataset of points
+ * indices = indices in the dataset
+ * Returns:
+ */
+ void chooseCentersKMeanspp(int k, int* indices, int indices_length, int* centers, int& centers_length)
+ {
+ int n = indices_length;
+
+ double currentPot = 0;
+ DistanceType* closestDistSq = new DistanceType[n];
+
+ // Choose one random center and set the closestDistSq values
+ int index = rand_int(n);
+ assert(index >=0 && index < n);
+ centers[0] = indices[index];
+
+ for (int i = 0; i < n; i++) {
+ closestDistSq[i] = distance_(dataset_[indices[i]], dataset_[indices[index]], dataset_.cols);
+ closestDistSq[i] = ensureSquareDistance<Distance>( closestDistSq[i] );
+ currentPot += closestDistSq[i];
+ }
+
+
+ const int numLocalTries = 1;
+
+ // Choose each center
+ int centerCount;
+ for (centerCount = 1; centerCount < k; centerCount++) {
+
+ // Repeat several trials
+ double bestNewPot = -1;
+ int bestNewIndex = -1;
+ for (int localTrial = 0; localTrial < numLocalTries; localTrial++) {
+
+ // Choose our center - have to be slightly careful to return a valid answer even accounting
+ // for possible rounding errors
+ double randVal = rand_double(currentPot);
+ for (index = 0; index < n-1; index++) {
+ if (randVal <= closestDistSq[index]) break;
+ else randVal -= closestDistSq[index];
+ }
+
+ // Compute the new potential
+ double newPot = 0;
+ for (int i = 0; i < n; i++) {
+ DistanceType dist = distance_(dataset_[indices[i]], dataset_[indices[index]], dataset_.cols);
+ newPot += std::min( ensureSquareDistance<Distance>(dist), closestDistSq[i] );
+ }
+
+ // Store the best result
+ if ((bestNewPot < 0)||(newPot < bestNewPot)) {
+ bestNewPot = newPot;
+ bestNewIndex = index;
+ }
+ }
+
+ // Add the appropriate center
+ centers[centerCount] = indices[bestNewIndex];
+ currentPot = bestNewPot;
+ for (int i = 0; i < n; i++) {
+ DistanceType dist = distance_(dataset_[indices[i]], dataset_[indices[bestNewIndex]], dataset_.cols);
+ closestDistSq[i] = std::min( ensureSquareDistance<Distance>(dist), closestDistSq[i] );
+ }
+ }
+
+ centers_length = centerCount;
+
+ delete[] closestDistSq;
+ }
+
+
+
+public:
+
+ flann_algorithm_t getType() const
+ {
+ return FLANN_INDEX_KMEANS;
+ }
+
+ /**
+ * Index constructor
+ *
+ * Params:
+ * inputData = dataset with the input features
+ * params = parameters passed to the hierarchical k-means algorithm
+ */
+ KMeansIndex(const Matrix<ElementType>& inputData, const IndexParams& params = KMeansIndexParams(),
+ Distance d = Distance())
+ : dataset_(inputData), index_params_(params), root_(NULL), indices_(NULL), distance_(d)
+ {
+ memoryCounter_ = 0;
+
+ size_ = dataset_.rows;
+ veclen_ = dataset_.cols;
+
+ branching_ = get_param(params,"branching",32);
+ iterations_ = get_param(params,"iterations",11);
+ if (iterations_<0) {
+ iterations_ = (std::numeric_limits<int>::max)();
+ }
+ centers_init_ = get_param(params,"centers_init",FLANN_CENTERS_RANDOM);
+
+ if (centers_init_==FLANN_CENTERS_RANDOM) {
+ chooseCenters = &KMeansIndex::chooseCentersRandom;
+ }
+ else if (centers_init_==FLANN_CENTERS_GONZALES) {
+ chooseCenters = &KMeansIndex::chooseCentersGonzales;
+ }
+ else if (centers_init_==FLANN_CENTERS_KMEANSPP) {
+ chooseCenters = &KMeansIndex::chooseCentersKMeanspp;
+ }
+ else {
+ throw FLANNException("Unknown algorithm for choosing initial centers.");
+ }
+ cb_index_ = 0.4f;
+
+ }
+
+
+ KMeansIndex(const KMeansIndex&);
+ KMeansIndex& operator=(const KMeansIndex&);
+
+
+ /**
+ * Index destructor.
+ *
+ * Release the memory used by the index.
+ */
+ virtual ~KMeansIndex()
+ {
+ if (root_ != NULL) {
+ free_centers(root_);
+ }
+ if (indices_!=NULL) {
+ delete[] indices_;
+ }
+ }
+
+ /**
+ * Returns size of index.
+ */
+ size_t size() const
+ {
+ return size_;
+ }
+
+ /**
+ * Returns the length of an index feature.
+ */
+ size_t veclen() const
+ {
+ return veclen_;
+ }
+
+
+ void set_cb_index( float index)
+ {
+ cb_index_ = index;
+ }
+
+ /**
+ * Computes the inde memory usage
+ * Returns: memory used by the index
+ */
+ int usedMemory() const
+ {
+ return pool_.usedMemory+pool_.wastedMemory+memoryCounter_;
+ }
+
+ /**
+ * Dummy implementation for other algorithms of addable indexes after that.
+ */
+ void addIndex(const Matrix<ElementType>& /*wholeData*/, const Matrix<ElementType>& /*additionalData*/)
+ {
+ }
+
+ /**
+ * Builds the index
+ */
+ void buildIndex()
+ {
+ if (branching_<2) {
+ throw FLANNException("Branching factor must be at least 2");
+ }
+
+ indices_ = new int[size_];
+ for (size_t i=0; i<size_; ++i) {
+ indices_[i] = int(i);
+ }
+
+ root_ = pool_.allocate<KMeansNode>();
+ std::memset(root_, 0, sizeof(KMeansNode));
+
+ computeNodeStatistics(root_, indices_, (int)size_);
+ computeClustering(root_, indices_, (int)size_, branching_,0);
+ }
+
+
+ void saveIndex(FILE* stream)
+ {
+ save_value(stream, branching_);
+ save_value(stream, iterations_);
+ save_value(stream, memoryCounter_);
+ save_value(stream, cb_index_);
+ save_value(stream, *indices_, (int)size_);
+
+ save_tree(stream, root_);
+ }
+
+
+ void loadIndex(FILE* stream)
+ {
+ load_value(stream, branching_);
+ load_value(stream, iterations_);
+ load_value(stream, memoryCounter_);
+ load_value(stream, cb_index_);
+ if (indices_!=NULL) {
+ delete[] indices_;
+ }
+ indices_ = new int[size_];
+ load_value(stream, *indices_, size_);
+
+ if (root_!=NULL) {
+ free_centers(root_);
+ }
+ load_tree(stream, root_);
+
+ index_params_["algorithm"] = getType();
+ index_params_["branching"] = branching_;
+ index_params_["iterations"] = iterations_;
+ index_params_["centers_init"] = centers_init_;
+ index_params_["cb_index"] = cb_index_;
+
+ }
+
+
+ /**
+ * Find set of nearest neighbors to vec. Their indices are stored inside
+ * the result object.
+ *
+ * Params:
+ * result = the result object in which the indices of the nearest-neighbors are stored
+ * vec = the vector for which to search the nearest neighbors
+ * searchParams = parameters that influence the search algorithm (checks, cb_index)
+ */
+ void findNeighbors(ResultSet<DistanceType>& result, const ElementType* vec, const SearchParams& searchParams)
+ {
+
+ int maxChecks = get_param(searchParams,"checks",32);
+
+ if (maxChecks==FLANN_CHECKS_UNLIMITED) {
+ findExactNN(root_, result, vec);
+ }
+ else {
+ // Priority queue storing intermediate branches in the best-bin-first search
+ Heap<BranchSt>* heap = new Heap<BranchSt>((int)size_);
+
+ int checks = 0;
+ findNN(root_, result, vec, checks, maxChecks, heap);
+
+ BranchSt branch;
+ while (heap->popMin(branch) && (checks<maxChecks || !result.full())) {
+ KMeansNodePtr node = branch.node;
+ findNN(node, result, vec, checks, maxChecks, heap);
+ }
+ assert(result.full());
+
+ delete heap;
+ }
+
+ }
+
+ /**
+ * Clustering function that takes a cut in the hierarchical k-means
+ * tree and return the clusters centers of that clustering.
+ * Params:
+ * numClusters = number of clusters to have in the clustering computed
+ * Returns: number of cluster centers
+ */
+ int getClusterCenters(Matrix<DistanceType>& centers)
+ {
+ int numClusters = centers.rows;
+ if (numClusters<1) {
+ throw FLANNException("Number of clusters must be at least 1");
+ }
+
+ DistanceType variance;
+ KMeansNodePtr* clusters = new KMeansNodePtr[numClusters];
+
+ int clusterCount = getMinVarianceClusters(root_, clusters, numClusters, variance);
+
+ Logger::info("Clusters requested: %d, returning %d\n",numClusters, clusterCount);
+
+ for (int i=0; i<clusterCount; ++i) {
+ DistanceType* center = clusters[i]->pivot;
+ for (size_t j=0; j<veclen_; ++j) {
+ centers[i][j] = center[j];
+ }
+ }
+ delete[] clusters;
+
+ return clusterCount;
+ }
+
+ IndexParams getParameters() const
+ {
+ return index_params_;
+ }
+
+
+private:
+ /**
+ * Struture representing a node in the hierarchical k-means tree.
+ */
+ struct KMeansNode
+ {
+ /**
+ * The cluster center.
+ */
+ DistanceType* pivot;
+ /**
+ * The cluster radius.
+ */
+ DistanceType radius;
+ /**
+ * The cluster mean radius.
+ */
+ DistanceType mean_radius;
+ /**
+ * The cluster variance.
+ */
+ DistanceType variance;
+ /**
+ * The cluster size (number of points in the cluster)
+ */
+ int size;
+ /**
+ * Child nodes (only for non-terminal nodes)
+ */
+ KMeansNode** childs;
+ /**
+ * Node points (only for terminal nodes)
+ */
+ int* indices;
+ /**
+ * Level
+ */
+ int level;
+ };
+ typedef KMeansNode* KMeansNodePtr;
+
+ /**
+ * Alias definition for a nicer syntax.
+ */
+ typedef BranchStruct<KMeansNodePtr, DistanceType> BranchSt;
+
+
+
+
+ void save_tree(FILE* stream, KMeansNodePtr node)
+ {
+ save_value(stream, *node);
+ save_value(stream, *(node->pivot), (int)veclen_);
+ if (node->childs==NULL) {
+ int indices_offset = (int)(node->indices - indices_);
+ save_value(stream, indices_offset);
+ }
+ else {
+ for(int i=0; i<branching_; ++i) {
+ save_tree(stream, node->childs[i]);
+ }
+ }
+ }
+
+
+ void load_tree(FILE* stream, KMeansNodePtr& node)
+ {
+ node = pool_.allocate<KMeansNode>();
+ load_value(stream, *node);
+ node->pivot = new DistanceType[veclen_];
+ load_value(stream, *(node->pivot), (int)veclen_);
+ if (node->childs==NULL) {
+ int indices_offset;
+ load_value(stream, indices_offset);
+ node->indices = indices_ + indices_offset;
+ }
+ else {
+ node->childs = pool_.allocate<KMeansNodePtr>(branching_);
+ for(int i=0; i<branching_; ++i) {
+ load_tree(stream, node->childs[i]);
+ }
+ }
+ }
+
+
+ /**
+ * Helper function
+ */
+ void free_centers(KMeansNodePtr node)
+ {
+ delete[] node->pivot;
+ if (node->childs!=NULL) {
+ for (int k=0; k<branching_; ++k) {
+ free_centers(node->childs[k]);
+ }
+ }
+ }
+
+ /**
+ * Computes the statistics of a node (mean, radius, variance).
+ *
+ * Params:
+ * node = the node to use
+ * indices = the indices of the points belonging to the node
+ */
+ void computeNodeStatistics(KMeansNodePtr node, int* indices, int indices_length)
+ {
+
+ DistanceType radius = 0;
+ DistanceType variance = 0;
+ DistanceType* mean = new DistanceType[veclen_];
+ memoryCounter_ += int(veclen_*sizeof(DistanceType));
+
+ memset(mean,0,veclen_*sizeof(DistanceType));
+
+ for (size_t i=0; i<size_; ++i) {
+ ElementType* vec = dataset_[indices[i]];
+ for (size_t j=0; j<veclen_; ++j) {
+ mean[j] += vec[j];
+ }
+ variance += distance_(vec, ZeroIterator<ElementType>(), veclen_);
+ }
+ for (size_t j=0; j<veclen_; ++j) {
+ mean[j] /= size_;
+ }
+ variance /= size_;
+ variance -= distance_(mean, ZeroIterator<ElementType>(), veclen_);
+
+ DistanceType tmp = 0;
+ for (int i=0; i<indices_length; ++i) {
+ tmp = distance_(mean, dataset_[indices[i]], veclen_);
+ if (tmp>radius) {
+ radius = tmp;
+ }
+ }
+
+ node->variance = variance;
+ node->radius = radius;
+ node->pivot = mean;
+ }
+
+
+ /**
+ * The method responsible with actually doing the recursive hierarchical
+ * clustering
+ *
+ * Params:
+ * node = the node to cluster
+ * indices = indices of the points belonging to the current node
+ * branching = the branching factor to use in the clustering
+ *
+ * TODO: for 1-sized clusters don't store a cluster center (it's the same as the single cluster point)
+ */
+ void computeClustering(KMeansNodePtr node, int* indices, int indices_length, int branching, int level)
+ {
+ node->size = indices_length;
+ node->level = level;
+
+ if (indices_length < branching) {
+ node->indices = indices;
+ std::sort(node->indices,node->indices+indices_length);
+ node->childs = NULL;
+ return;
+ }
+
+ int* centers_idx = new int[branching];
+ int centers_length;
+ (this->*chooseCenters)(branching, indices, indices_length, centers_idx, centers_length);
+
+ if (centers_length<branching) {
+ node->indices = indices;
+ std::sort(node->indices,node->indices+indices_length);
+ node->childs = NULL;
+ delete [] centers_idx;
+ return;
+ }
+
+
+ Matrix<double> dcenters(new double[branching*veclen_],branching,veclen_);
+ for (int i=0; i<centers_length; ++i) {
+ ElementType* vec = dataset_[centers_idx[i]];
+ for (size_t k=0; k<veclen_; ++k) {
+ dcenters[i][k] = double(vec[k]);
+ }
+ }
+ delete[] centers_idx;
+
+ std::vector<DistanceType> radiuses(branching);
+ int* count = new int[branching];
+ for (int i=0; i<branching; ++i) {
+ radiuses[i] = 0;
+ count[i] = 0;
+ }
+
+ // assign points to clusters
+ int* belongs_to = new int[indices_length];
+ for (int i=0; i<indices_length; ++i) {
+
+ DistanceType sq_dist = distance_(dataset_[indices[i]], dcenters[0], veclen_);
+ belongs_to[i] = 0;
+ for (int j=1; j<branching; ++j) {
+ DistanceType new_sq_dist = distance_(dataset_[indices[i]], dcenters[j], veclen_);
+ if (sq_dist>new_sq_dist) {
+ belongs_to[i] = j;
+ sq_dist = new_sq_dist;
+ }
+ }
+ if (sq_dist>radiuses[belongs_to[i]]) {
+ radiuses[belongs_to[i]] = sq_dist;
+ }
+ count[belongs_to[i]]++;
+ }
+
+ bool converged = false;
+ int iteration = 0;
+ while (!converged && iteration<iterations_) {
+ converged = true;
+ iteration++;
+
+ // compute the new cluster centers
+ for (int i=0; i<branching; ++i) {
+ memset(dcenters[i],0,sizeof(double)*veclen_);
+ radiuses[i] = 0;
+ }
+ for (int i=0; i<indices_length; ++i) {
+ ElementType* vec = dataset_[indices[i]];
+ double* center = dcenters[belongs_to[i]];
+ for (size_t k=0; k<veclen_; ++k) {
+ center[k] += vec[k];
+ }
+ }
+ for (int i=0; i<branching; ++i) {
+ int cnt = count[i];
+ for (size_t k=0; k<veclen_; ++k) {
+ dcenters[i][k] /= cnt;
+ }
+ }
+
+ // reassign points to clusters
+ for (int i=0; i<indices_length; ++i) {
+ DistanceType sq_dist = distance_(dataset_[indices[i]], dcenters[0], veclen_);
+ int new_centroid = 0;
+ for (int j=1; j<branching; ++j) {
+ DistanceType new_sq_dist = distance_(dataset_[indices[i]], dcenters[j], veclen_);
+ if (sq_dist>new_sq_dist) {
+ new_centroid = j;
+ sq_dist = new_sq_dist;
+ }
+ }
+ if (sq_dist>radiuses[new_centroid]) {
+ radiuses[new_centroid] = sq_dist;
+ }
+ if (new_centroid != belongs_to[i]) {
+ count[belongs_to[i]]--;
+ count[new_centroid]++;
+ belongs_to[i] = new_centroid;
+
+ converged = false;
+ }
+ }
+
+ for (int i=0; i<branching; ++i) {
+ // if one cluster converges to an empty cluster,
+ // move an element into that cluster
+ if (count[i]==0) {
+ int j = (i+1)%branching;
+ while (count[j]<=1) {
+ j = (j+1)%branching;
+ }
+
+ for (int k=0; k<indices_length; ++k) {
+ if (belongs_to[k]==j) {
+ // for cluster j, we move the furthest element from the center to the empty cluster i
+ if ( distance_(dataset_[indices[k]], dcenters[j], veclen_) == radiuses[j] ) {
+ belongs_to[k] = i;
+ count[j]--;
+ count[i]++;
+ break;
+ }
+ }
+ }
+ converged = false;
+ }
+ }
+
+ }
+
+ DistanceType** centers = new DistanceType*[branching];
+
+ for (int i=0; i<branching; ++i) {
+ centers[i] = new DistanceType[veclen_];
+ memoryCounter_ += (int)(veclen_*sizeof(DistanceType));
+ for (size_t k=0; k<veclen_; ++k) {
+ centers[i][k] = (DistanceType)dcenters[i][k];
+ }
+ }
+
+
+ // compute kmeans clustering for each of the resulting clusters
+ node->childs = pool_.allocate<KMeansNodePtr>(branching);
+ int start = 0;
+ int end = start;
+ for (int c=0; c<branching; ++c) {
+ int s = count[c];
+
+ DistanceType variance = 0;
+ DistanceType mean_radius =0;
+ for (int i=0; i<indices_length; ++i) {
+ if (belongs_to[i]==c) {
+ DistanceType d = distance_(dataset_[indices[i]], ZeroIterator<ElementType>(), veclen_);
+ variance += d;
+ mean_radius += sqrt(d);
+ std::swap(indices[i],indices[end]);
+ std::swap(belongs_to[i],belongs_to[end]);
+ end++;
+ }
+ }
+ variance /= s;
+ mean_radius /= s;
+ variance -= distance_(centers[c], ZeroIterator<ElementType>(), veclen_);
+
+ node->childs[c] = pool_.allocate<KMeansNode>();
+ std::memset(node->childs[c], 0, sizeof(KMeansNode));
+ node->childs[c]->radius = radiuses[c];
+ node->childs[c]->pivot = centers[c];
+ node->childs[c]->variance = variance;
+ node->childs[c]->mean_radius = mean_radius;
+ computeClustering(node->childs[c],indices+start, end-start, branching, level+1);
+ start=end;
+ }
+
+ delete[] dcenters.data;
+ delete[] centers;
+ delete[] count;
+ delete[] belongs_to;
+ }
+
+
+
+ /**
+ * Performs one descent in the hierarchical k-means tree. The branches not
+ * visited are stored in a priority queue.
+ *
+ * Params:
+ * node = node to explore
+ * result = container for the k-nearest neighbors found
+ * vec = query points
+ * checks = how many points in the dataset have been checked so far
+ * maxChecks = maximum dataset points to checks
+ */
+
+
+ void findNN(KMeansNodePtr node, ResultSet<DistanceType>& result, const ElementType* vec, int& checks, int maxChecks,
+ Heap<BranchSt>* heap)
+ {
+ // Ignore those clusters that are too far away
+ {
+ DistanceType bsq = distance_(vec, node->pivot, veclen_);
+ DistanceType rsq = node->radius;
+ DistanceType wsq = result.worstDist();
+
+ DistanceType val = bsq-rsq-wsq;
+ DistanceType val2 = val*val-4*rsq*wsq;
+
+ //if (val>0) {
+ if ((val>0)&&(val2>0)) {
+ return;
+ }
+ }
+
+ if (node->childs==NULL) {
+ if (checks>=maxChecks) {
+ if (result.full()) return;
+ }
+ checks += node->size;
+ for (int i=0; i<node->size; ++i) {
+ int index = node->indices[i];
+ DistanceType dist = distance_(dataset_[index], vec, veclen_);
+ result.addPoint(dist, index);
+ }
+ }
+ else {
+ DistanceType* domain_distances = new DistanceType[branching_];
+ int closest_center = exploreNodeBranches(node, vec, domain_distances, heap);
+ delete[] domain_distances;
+ findNN(node->childs[closest_center],result,vec, checks, maxChecks, heap);
+ }
+ }
+
+ /**
+ * Helper function that computes the nearest childs of a node to a given query point.
+ * Params:
+ * node = the node
+ * q = the query point
+ * distances = array with the distances to each child node.
+ * Returns:
+ */
+ int exploreNodeBranches(KMeansNodePtr node, const ElementType* q, DistanceType* domain_distances, Heap<BranchSt>* heap)
+ {
+
+ int best_index = 0;
+ domain_distances[best_index] = distance_(q, node->childs[best_index]->pivot, veclen_);
+ for (int i=1; i<branching_; ++i) {
+ domain_distances[i] = distance_(q, node->childs[i]->pivot, veclen_);
+ if (domain_distances[i]<domain_distances[best_index]) {
+ best_index = i;
+ }
+ }
+
+ // float* best_center = node->childs[best_index]->pivot;
+ for (int i=0; i<branching_; ++i) {
+ if (i != best_index) {
+ domain_distances[i] -= cb_index_*node->childs[i]->variance;
+
+ // float dist_to_border = getDistanceToBorder(node.childs[i].pivot,best_center,q);
+ // if (domain_distances[i]<dist_to_border) {
+ // domain_distances[i] = dist_to_border;
+ // }
+ heap->insert(BranchSt(node->childs[i],domain_distances[i]));
+ }
+ }
+
+ return best_index;
+ }
+
+
+ /**
+ * Function the performs exact nearest neighbor search by traversing the entire tree.
+ */
+ void findExactNN(KMeansNodePtr node, ResultSet<DistanceType>& result, const ElementType* vec)
+ {
+ // Ignore those clusters that are too far away
+ {
+ DistanceType bsq = distance_(vec, node->pivot, veclen_);
+ DistanceType rsq = node->radius;
+ DistanceType wsq = result.worstDist();
+
+ DistanceType val = bsq-rsq-wsq;
+ DistanceType val2 = val*val-4*rsq*wsq;
+
+ // if (val>0) {
+ if ((val>0)&&(val2>0)) {
+ return;
+ }
+ }
+
+
+ if (node->childs==NULL) {
+ for (int i=0; i<node->size; ++i) {
+ int index = node->indices[i];
+ DistanceType dist = distance_(dataset_[index], vec, veclen_);
+ result.addPoint(dist, index);
+ }
+ }
+ else {
+ int* sort_indices = new int[branching_];
+
+ getCenterOrdering(node, vec, sort_indices);
+
+ for (int i=0; i<branching_; ++i) {
+ findExactNN(node->childs[sort_indices[i]],result,vec);
+ }
+
+ delete[] sort_indices;
+ }
+ }
+
+
+ /**
+ * Helper function.
+ *
+ * I computes the order in which to traverse the child nodes of a particular node.
+ */
+ void getCenterOrdering(KMeansNodePtr node, const ElementType* q, int* sort_indices)
+ {
+ DistanceType* domain_distances = new DistanceType[branching_];
+ for (int i=0; i<branching_; ++i) {
+ DistanceType dist = distance_(q, node->childs[i]->pivot, veclen_);
+
+ int j=0;
+ while (domain_distances[j]<dist && j<i) j++;
+ for (int k=i; k>j; --k) {
+ domain_distances[k] = domain_distances[k-1];
+ sort_indices[k] = sort_indices[k-1];
+ }
+ domain_distances[j] = dist;
+ sort_indices[j] = i;
+ }
+ delete[] domain_distances;
+ }
+
+ /**
+ * Method that computes the squared distance from the query point q
+ * from inside region with center c to the border between this
+ * region and the region with center p
+ */
+ DistanceType getDistanceToBorder(DistanceType* p, DistanceType* c, DistanceType* q)
+ {
+ DistanceType sum = 0;
+ DistanceType sum2 = 0;
+
+ for (int i=0; i<veclen_; ++i) {
+ DistanceType t = c[i]-p[i];
+ sum += t*(q[i]-(c[i]+p[i])/2);
+ sum2 += t*t;
+ }
+
+ return sum*sum/sum2;
+ }
+
+
+ /**
+ * Helper function the descends in the hierarchical k-means tree by spliting those clusters that minimize
+ * the overall variance of the clustering.
+ * Params:
+ * root = root node
+ * clusters = array with clusters centers (return value)
+ * varianceValue = variance of the clustering (return value)
+ * Returns:
+ */
+ int getMinVarianceClusters(KMeansNodePtr root, KMeansNodePtr* clusters, int clusters_length, DistanceType& varianceValue)
+ {
+ int clusterCount = 1;
+ clusters[0] = root;
+
+ DistanceType meanVariance = root->variance*root->size;
+
+ while (clusterCount<clusters_length) {
+ DistanceType minVariance = (std::numeric_limits<DistanceType>::max)();
+ int splitIndex = -1;
+
+ for (int i=0; i<clusterCount; ++i) {
+ if (clusters[i]->childs != NULL) {
+
+ DistanceType variance = meanVariance - clusters[i]->variance*clusters[i]->size;
+
+ for (int j=0; j<branching_; ++j) {
+ variance += clusters[i]->childs[j]->variance*clusters[i]->childs[j]->size;
+ }
+ if (variance<minVariance) {
+ minVariance = variance;
+ splitIndex = i;
+ }
+ }
+ }
+
+ if (splitIndex==-1) break;
+ if ( (branching_+clusterCount-1) > clusters_length) break;
+
+ meanVariance = minVariance;
+
+ // split node
+ KMeansNodePtr toSplit = clusters[splitIndex];
+ clusters[splitIndex] = toSplit->childs[0];
+ for (int i=1; i<branching_; ++i) {
+ clusters[clusterCount++] = toSplit->childs[i];
+ }
+ }
+
+ varianceValue = meanVariance/root->size;
+ return clusterCount;
+ }
+
+private:
+ /** The branching factor used in the hierarchical k-means clustering */
+ int branching_;
+
+ /** Maximum number of iterations to use when performing k-means clustering */
+ int iterations_;
+
+ /** Algorithm for choosing the cluster centers */
+ flann_centers_init_t centers_init_;
+
+ /**
+ * Cluster border index. This is used in the tree search phase when determining
+ * the closest cluster to explore next. A zero value takes into account only
+ * the cluster centres, a value greater then zero also take into account the size
+ * of the cluster.
+ */
+ float cb_index_;
+
+ /**
+ * The dataset used by this index
+ */
+ const Matrix<ElementType> dataset_;
+
+ /** Index parameters */
+ IndexParams index_params_;
+
+ /**
+ * Number of features in the dataset.
+ */
+ size_t size_;
+
+ /**
+ * Length of each feature.
+ */
+ size_t veclen_;
+
+ /**
+ * The root node in the tree.
+ */
+ KMeansNodePtr root_;
+
+ /**
+ * Array of indices to vectors in the dataset.
+ */
+ int* indices_;
+
+ /**
+ * The distance
+ */
+ Distance distance_;
+
+ /**
+ * Pooled memory allocator.
+ */
+ PooledAllocator pool_;
+
+ /**
+ * Memory occupied by the index.
+ */
+ int memoryCounter_;
+};
+
+}
+
+#endif //OPENCV_FLANN_KMEANS_INDEX_H_
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/linear_index.h b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/linear_index.h
new file mode 100644
index 00000000..0ea084a5
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/linear_index.h
@@ -0,0 +1,139 @@
+/***********************************************************************
+ * Software License Agreement (BSD License)
+ *
+ * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved.
+ * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved.
+ *
+ * THE BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *************************************************************************/
+
+#ifndef OPENCV_FLANN_LINEAR_INDEX_H_
+#define OPENCV_FLANN_LINEAR_INDEX_H_
+
+#include "general.h"
+#include "nn_index.h"
+
+namespace cvflann
+{
+
+struct LinearIndexParams : public IndexParams
+{
+ LinearIndexParams()
+ {
+ (* this)["algorithm"] = FLANN_INDEX_LINEAR;
+ }
+};
+
+template <typename Distance>
+class LinearIndex : public NNIndex<Distance>
+{
+public:
+
+ typedef typename Distance::ElementType ElementType;
+ typedef typename Distance::ResultType DistanceType;
+
+
+ LinearIndex(const Matrix<ElementType>& inputData, const IndexParams& params = LinearIndexParams(),
+ Distance d = Distance()) :
+ dataset_(inputData), index_params_(params), distance_(d)
+ {
+ }
+
+ LinearIndex(const LinearIndex&);
+ LinearIndex& operator=(const LinearIndex&);
+
+ flann_algorithm_t getType() const
+ {
+ return FLANN_INDEX_LINEAR;
+ }
+
+
+ size_t size() const
+ {
+ return dataset_.rows;
+ }
+
+ size_t veclen() const
+ {
+ return dataset_.cols;
+ }
+
+
+ int usedMemory() const
+ {
+ return 0;
+ }
+
+ /**
+ * Dummy implementation for other algorithms of addable indexes after that.
+ */
+ void addIndex(const Matrix<ElementType>& /*wholeData*/, const Matrix<ElementType>& /*additionalData*/)
+ {
+ }
+
+ void buildIndex()
+ {
+ /* nothing to do here for linear search */
+ }
+
+ void saveIndex(FILE*)
+ {
+ /* nothing to do here for linear search */
+ }
+
+
+ void loadIndex(FILE*)
+ {
+ /* nothing to do here for linear search */
+
+ index_params_["algorithm"] = getType();
+ }
+
+ void findNeighbors(ResultSet<DistanceType>& resultSet, const ElementType* vec, const SearchParams& /*searchParams*/)
+ {
+ ElementType* data = dataset_.data;
+ for (size_t i = 0; i < dataset_.rows; ++i, data += dataset_.cols) {
+ DistanceType dist = distance_(data, vec, dataset_.cols);
+ resultSet.addPoint(dist, (int)i);
+ }
+ }
+
+ IndexParams getParameters() const
+ {
+ return index_params_;
+ }
+
+private:
+ /** The dataset */
+ const Matrix<ElementType> dataset_;
+ /** Index parameters */
+ IndexParams index_params_;
+ /** Index distance */
+ Distance distance_;
+
+};
+
+}
+
+#endif // OPENCV_FLANN_LINEAR_INDEX_H_
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/logger.h b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/logger.h
new file mode 100644
index 00000000..24f3fb69
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/logger.h
@@ -0,0 +1,130 @@
+/***********************************************************************
+ * Software License Agreement (BSD License)
+ *
+ * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved.
+ * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved.
+ *
+ * THE BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *************************************************************************/
+
+#ifndef OPENCV_FLANN_LOGGER_H
+#define OPENCV_FLANN_LOGGER_H
+
+#include <stdio.h>
+#include <stdarg.h>
+
+#include "defines.h"
+
+
+namespace cvflann
+{
+
+class Logger
+{
+ Logger() : stream(stdout), logLevel(FLANN_LOG_WARN) {}
+
+ ~Logger()
+ {
+ if ((stream!=NULL)&&(stream!=stdout)) {
+ fclose(stream);
+ }
+ }
+
+ static Logger& instance()
+ {
+ static Logger logger;
+ return logger;
+ }
+
+ void _setDestination(const char* name)
+ {
+ if (name==NULL) {
+ stream = stdout;
+ }
+ else {
+ stream = fopen(name,"w");
+ if (stream == NULL) {
+ stream = stdout;
+ }
+ }
+ }
+
+ int _log(int level, const char* fmt, va_list arglist)
+ {
+ if (level > logLevel ) return -1;
+ int ret = vfprintf(stream, fmt, arglist);
+ return ret;
+ }
+
+public:
+ /**
+ * Sets the logging level. All messages with lower priority will be ignored.
+ * @param level Logging level
+ */
+ static void setLevel(int level) { instance().logLevel = level; }
+
+ /**
+ * Sets the logging destination
+ * @param name Filename or NULL for console
+ */
+ static void setDestination(const char* name) { instance()._setDestination(name); }
+
+ /**
+ * Print log message
+ * @param level Log level
+ * @param fmt Message format
+ * @return
+ */
+ static int log(int level, const char* fmt, ...)
+ {
+ va_list arglist;
+ va_start(arglist, fmt);
+ int ret = instance()._log(level,fmt,arglist);
+ va_end(arglist);
+ return ret;
+ }
+
+#define LOG_METHOD(NAME,LEVEL) \
+ static int NAME(const char* fmt, ...) \
+ { \
+ va_list ap; \
+ va_start(ap, fmt); \
+ int ret = instance()._log(LEVEL, fmt, ap); \
+ va_end(ap); \
+ return ret; \
+ }
+
+ LOG_METHOD(fatal, FLANN_LOG_FATAL)
+ LOG_METHOD(error, FLANN_LOG_ERROR)
+ LOG_METHOD(warn, FLANN_LOG_WARN)
+ LOG_METHOD(info, FLANN_LOG_INFO)
+
+private:
+ FILE* stream;
+ int logLevel;
+};
+
+}
+
+#endif //OPENCV_FLANN_LOGGER_H
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/lsh_index.h b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/lsh_index.h
new file mode 100644
index 00000000..2b89337d
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/lsh_index.h
@@ -0,0 +1,420 @@
+/***********************************************************************
+ * Software License Agreement (BSD License)
+ *
+ * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved.
+ * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved.
+ *
+ * THE BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *************************************************************************/
+
+/***********************************************************************
+ * Author: Vincent Rabaud
+ *************************************************************************/
+
+#ifndef OPENCV_FLANN_LSH_INDEX_H_
+#define OPENCV_FLANN_LSH_INDEX_H_
+
+#include <algorithm>
+#include <cassert>
+#include <cstring>
+#include <map>
+#include <vector>
+
+#include "general.h"
+#include "nn_index.h"
+#include "matrix.h"
+#include "result_set.h"
+#include "heap.h"
+#include "lsh_table.h"
+#include "allocator.h"
+#include "random.h"
+#include "saving.h"
+
+namespace cvflann
+{
+
+struct LshIndexParams : public IndexParams
+{
+ LshIndexParams(unsigned int table_number = 12, unsigned int key_size = 20, unsigned int multi_probe_level = 2)
+ {
+ (* this)["algorithm"] = FLANN_INDEX_LSH;
+ // The number of hash tables to use
+ (*this)["table_number"] = table_number;
+ // The length of the key in the hash tables
+ (*this)["key_size"] = key_size;
+ // Number of levels to use in multi-probe (0 for standard LSH)
+ (*this)["multi_probe_level"] = multi_probe_level;
+ }
+};
+
+/**
+ * Randomized kd-tree index
+ *
+ * Contains the k-d trees and other information for indexing a set of points
+ * for nearest-neighbor matching.
+ */
+template<typename Distance>
+class LshIndex : public NNIndex<Distance>
+{
+public:
+ typedef typename Distance::ElementType ElementType;
+ typedef typename Distance::ResultType DistanceType;
+
+ /** Constructor
+ * @param input_data dataset with the input features
+ * @param params parameters passed to the LSH algorithm
+ * @param d the distance used
+ */
+ LshIndex(const Matrix<ElementType>& input_data, const IndexParams& params = LshIndexParams(),
+ Distance d = Distance()) :
+ dataset_(input_data), index_params_(params), distance_(d)
+ {
+ // cv::flann::IndexParams sets integer params as 'int', so it is used with get_param
+ // in place of 'unsigned int'
+ table_number_ = (unsigned int)get_param<int>(index_params_,"table_number",12);
+ key_size_ = (unsigned int)get_param<int>(index_params_,"key_size",20);
+ multi_probe_level_ = (unsigned int)get_param<int>(index_params_,"multi_probe_level",2);
+
+ feature_size_ = (unsigned)dataset_.cols;
+ fill_xor_mask(0, key_size_, multi_probe_level_, xor_masks_);
+ }
+
+
+ LshIndex(const LshIndex&);
+ LshIndex& operator=(const LshIndex&);
+
+ /**
+ * Implementation for the LSH addable indexes after that.
+ * @param wholeData whole dataset with the input features
+ * @param additionalData additional dataset with the input features
+ */
+ void addIndex(const Matrix<ElementType>& wholeData, const Matrix<ElementType>& additionalData)
+ {
+ tables_.resize(table_number_);
+ for (unsigned int i = 0; i < table_number_; ++i) {
+ lsh::LshTable<ElementType>& table = tables_[i];
+ // Add the features to the table with indexed offset
+ table.add((int)(wholeData.rows - additionalData.rows), additionalData);
+ }
+ dataset_ = wholeData;
+ }
+
+ /**
+ * Builds the index
+ */
+ void buildIndex()
+ {
+ std::vector<size_t> indices(feature_size_ * CHAR_BIT);
+
+ tables_.resize(table_number_);
+ for (unsigned int i = 0; i < table_number_; ++i) {
+
+ //re-initialize the random indices table that the LshTable will use to pick its sub-dimensions
+ if( (indices.size() == feature_size_ * CHAR_BIT) || (indices.size() < key_size_) )
+ {
+ indices.resize( feature_size_ * CHAR_BIT );
+ for (size_t j = 0; j < feature_size_ * CHAR_BIT; ++j)
+ indices[j] = j;
+ std::random_shuffle(indices.begin(), indices.end());
+ }
+
+ lsh::LshTable<ElementType>& table = tables_[i];
+ table = lsh::LshTable<ElementType>(feature_size_, key_size_, indices);
+
+ // Add the features to the table with offset 0
+ table.add(0, dataset_);
+ }
+ }
+
+ flann_algorithm_t getType() const
+ {
+ return FLANN_INDEX_LSH;
+ }
+
+
+ void saveIndex(FILE* stream)
+ {
+ save_value(stream,table_number_);
+ save_value(stream,key_size_);
+ save_value(stream,multi_probe_level_);
+ save_value(stream, dataset_);
+ }
+
+ void loadIndex(FILE* stream)
+ {
+ load_value(stream, table_number_);
+ load_value(stream, key_size_);
+ load_value(stream, multi_probe_level_);
+ load_value(stream, dataset_);
+ // Building the index is so fast we can afford not storing it
+ buildIndex();
+
+ index_params_["algorithm"] = getType();
+ index_params_["table_number"] = table_number_;
+ index_params_["key_size"] = key_size_;
+ index_params_["multi_probe_level"] = multi_probe_level_;
+ }
+
+ /**
+ * Returns size of index.
+ */
+ size_t size() const
+ {
+ return dataset_.rows;
+ }
+
+ /**
+ * Returns the length of an index feature.
+ */
+ size_t veclen() const
+ {
+ return feature_size_;
+ }
+
+ /**
+ * Computes the index memory usage
+ * Returns: memory used by the index
+ */
+ int usedMemory() const
+ {
+ return (int)(dataset_.rows * sizeof(int));
+ }
+
+
+ IndexParams getParameters() const
+ {
+ return index_params_;
+ }
+
+ /**
+ * \brief Perform k-nearest neighbor search
+ * \param[in] queries The query points for which to find the nearest neighbors
+ * \param[out] indices The indices of the nearest neighbors found
+ * \param[out] dists Distances to the nearest neighbors found
+ * \param[in] knn Number of nearest neighbors to return
+ * \param[in] params Search parameters
+ */
+ virtual void knnSearch(const Matrix<ElementType>& queries, Matrix<int>& indices, Matrix<DistanceType>& dists, int knn, const SearchParams& params)
+ {
+ assert(queries.cols == veclen());
+ assert(indices.rows >= queries.rows);
+ assert(dists.rows >= queries.rows);
+ assert(int(indices.cols) >= knn);
+ assert(int(dists.cols) >= knn);
+
+
+ KNNUniqueResultSet<DistanceType> resultSet(knn);
+ for (size_t i = 0; i < queries.rows; i++) {
+ resultSet.clear();
+ std::fill_n(indices[i], knn, -1);
+ std::fill_n(dists[i], knn, std::numeric_limits<DistanceType>::max());
+ findNeighbors(resultSet, queries[i], params);
+ if (get_param(params,"sorted",true)) resultSet.sortAndCopy(indices[i], dists[i], knn);
+ else resultSet.copy(indices[i], dists[i], knn);
+ }
+ }
+
+
+ /**
+ * Find set of nearest neighbors to vec. Their indices are stored inside
+ * the result object.
+ *
+ * Params:
+ * result = the result object in which the indices of the nearest-neighbors are stored
+ * vec = the vector for which to search the nearest neighbors
+ * maxCheck = the maximum number of restarts (in a best-bin-first manner)
+ */
+ void findNeighbors(ResultSet<DistanceType>& result, const ElementType* vec, const SearchParams& /*searchParams*/)
+ {
+ getNeighbors(vec, result);
+ }
+
+private:
+ /** Defines the comparator on score and index
+ */
+ typedef std::pair<float, unsigned int> ScoreIndexPair;
+ struct SortScoreIndexPairOnSecond
+ {
+ bool operator()(const ScoreIndexPair& left, const ScoreIndexPair& right) const
+ {
+ return left.second < right.second;
+ }
+ };
+
+ /** Fills the different xor masks to use when getting the neighbors in multi-probe LSH
+ * @param key the key we build neighbors from
+ * @param lowest_index the lowest index of the bit set
+ * @param level the multi-probe level we are at
+ * @param xor_masks all the xor mask
+ */
+ void fill_xor_mask(lsh::BucketKey key, int lowest_index, unsigned int level,
+ std::vector<lsh::BucketKey>& xor_masks)
+ {
+ xor_masks.push_back(key);
+ if (level == 0) return;
+ for (int index = lowest_index - 1; index >= 0; --index) {
+ // Create a new key
+ lsh::BucketKey new_key = key | (1 << index);
+ fill_xor_mask(new_key, index, level - 1, xor_masks);
+ }
+ }
+
+ /** Performs the approximate nearest-neighbor search.
+ * @param vec the feature to analyze
+ * @param do_radius flag indicating if we check the radius too
+ * @param radius the radius if it is a radius search
+ * @param do_k flag indicating if we limit the number of nn
+ * @param k_nn the number of nearest neighbors
+ * @param checked_average used for debugging
+ */
+ void getNeighbors(const ElementType* vec, bool /*do_radius*/, float radius, bool do_k, unsigned int k_nn,
+ float& /*checked_average*/)
+ {
+ static std::vector<ScoreIndexPair> score_index_heap;
+
+ if (do_k) {
+ unsigned int worst_score = std::numeric_limits<unsigned int>::max();
+ typename std::vector<lsh::LshTable<ElementType> >::const_iterator table = tables_.begin();
+ typename std::vector<lsh::LshTable<ElementType> >::const_iterator table_end = tables_.end();
+ for (; table != table_end; ++table) {
+ size_t key = table->getKey(vec);
+ std::vector<lsh::BucketKey>::const_iterator xor_mask = xor_masks_.begin();
+ std::vector<lsh::BucketKey>::const_iterator xor_mask_end = xor_masks_.end();
+ for (; xor_mask != xor_mask_end; ++xor_mask) {
+ size_t sub_key = key ^ (*xor_mask);
+ const lsh::Bucket* bucket = table->getBucketFromKey(sub_key);
+ if (bucket == 0) continue;
+
+ // Go over each descriptor index
+ std::vector<lsh::FeatureIndex>::const_iterator training_index = bucket->begin();
+ std::vector<lsh::FeatureIndex>::const_iterator last_training_index = bucket->end();
+ DistanceType hamming_distance;
+
+ // Process the rest of the candidates
+ for (; training_index < last_training_index; ++training_index) {
+ hamming_distance = distance_(vec, dataset_[*training_index], dataset_.cols);
+
+ if (hamming_distance < worst_score) {
+ // Insert the new element
+ score_index_heap.push_back(ScoreIndexPair(hamming_distance, training_index));
+ std::push_heap(score_index_heap.begin(), score_index_heap.end());
+
+ if (score_index_heap.size() > (unsigned int)k_nn) {
+ // Remove the highest distance value as we have too many elements
+ std::pop_heap(score_index_heap.begin(), score_index_heap.end());
+ score_index_heap.pop_back();
+ // Keep track of the worst score
+ worst_score = score_index_heap.front().first;
+ }
+ }
+ }
+ }
+ }
+ }
+ else {
+ typename std::vector<lsh::LshTable<ElementType> >::const_iterator table = tables_.begin();
+ typename std::vector<lsh::LshTable<ElementType> >::const_iterator table_end = tables_.end();
+ for (; table != table_end; ++table) {
+ size_t key = table->getKey(vec);
+ std::vector<lsh::BucketKey>::const_iterator xor_mask = xor_masks_.begin();
+ std::vector<lsh::BucketKey>::const_iterator xor_mask_end = xor_masks_.end();
+ for (; xor_mask != xor_mask_end; ++xor_mask) {
+ size_t sub_key = key ^ (*xor_mask);
+ const lsh::Bucket* bucket = table->getBucketFromKey(sub_key);
+ if (bucket == 0) continue;
+
+ // Go over each descriptor index
+ std::vector<lsh::FeatureIndex>::const_iterator training_index = bucket->begin();
+ std::vector<lsh::FeatureIndex>::const_iterator last_training_index = bucket->end();
+ DistanceType hamming_distance;
+
+ // Process the rest of the candidates
+ for (; training_index < last_training_index; ++training_index) {
+ // Compute the Hamming distance
+ hamming_distance = distance_(vec, dataset_[*training_index], dataset_.cols);
+ if (hamming_distance < radius) score_index_heap.push_back(ScoreIndexPair(hamming_distance, training_index));
+ }
+ }
+ }
+ }
+ }
+
+ /** Performs the approximate nearest-neighbor search.
+ * This is a slower version than the above as it uses the ResultSet
+ * @param vec the feature to analyze
+ */
+ void getNeighbors(const ElementType* vec, ResultSet<DistanceType>& result)
+ {
+ typename std::vector<lsh::LshTable<ElementType> >::const_iterator table = tables_.begin();
+ typename std::vector<lsh::LshTable<ElementType> >::const_iterator table_end = tables_.end();
+ for (; table != table_end; ++table) {
+ size_t key = table->getKey(vec);
+ std::vector<lsh::BucketKey>::const_iterator xor_mask = xor_masks_.begin();
+ std::vector<lsh::BucketKey>::const_iterator xor_mask_end = xor_masks_.end();
+ for (; xor_mask != xor_mask_end; ++xor_mask) {
+ size_t sub_key = key ^ (*xor_mask);
+ const lsh::Bucket* bucket = table->getBucketFromKey((lsh::BucketKey)sub_key);
+ if (bucket == 0) continue;
+
+ // Go over each descriptor index
+ std::vector<lsh::FeatureIndex>::const_iterator training_index = bucket->begin();
+ std::vector<lsh::FeatureIndex>::const_iterator last_training_index = bucket->end();
+ DistanceType hamming_distance;
+
+ // Process the rest of the candidates
+ for (; training_index < last_training_index; ++training_index) {
+ // Compute the Hamming distance
+ hamming_distance = distance_(vec, dataset_[*training_index], (int)dataset_.cols);
+ result.addPoint(hamming_distance, *training_index);
+ }
+ }
+ }
+ }
+
+ /** The different hash tables */
+ std::vector<lsh::LshTable<ElementType> > tables_;
+
+ /** The data the LSH tables where built from */
+ Matrix<ElementType> dataset_;
+
+ /** The size of the features (as ElementType[]) */
+ unsigned int feature_size_;
+
+ IndexParams index_params_;
+
+ /** table number */
+ unsigned int table_number_;
+ /** key size */
+ unsigned int key_size_;
+ /** How far should we look for neighbors in multi-probe LSH */
+ unsigned int multi_probe_level_;
+
+ /** The XOR masks to apply to a key to get the neighboring buckets */
+ std::vector<lsh::BucketKey> xor_masks_;
+
+ Distance distance_;
+};
+}
+
+#endif //OPENCV_FLANN_LSH_INDEX_H_
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/lsh_table.h b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/lsh_table.h
new file mode 100644
index 00000000..cef01b2e
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/lsh_table.h
@@ -0,0 +1,497 @@
+/***********************************************************************
+ * Software License Agreement (BSD License)
+ *
+ * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved.
+ * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved.
+ *
+ * THE BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *************************************************************************/
+
+/***********************************************************************
+ * Author: Vincent Rabaud
+ *************************************************************************/
+
+#ifndef OPENCV_FLANN_LSH_TABLE_H_
+#define OPENCV_FLANN_LSH_TABLE_H_
+
+#include <algorithm>
+#include <iostream>
+#include <iomanip>
+#include <limits.h>
+// TODO as soon as we use C++0x, use the code in USE_UNORDERED_MAP
+#ifdef __GXX_EXPERIMENTAL_CXX0X__
+# define USE_UNORDERED_MAP 1
+#else
+# define USE_UNORDERED_MAP 0
+#endif
+#if USE_UNORDERED_MAP
+#include <unordered_map>
+#else
+#include <map>
+#endif
+#include <math.h>
+#include <stddef.h>
+
+#include "dynamic_bitset.h"
+#include "matrix.h"
+
+namespace cvflann
+{
+
+namespace lsh
+{
+
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+/** What is stored in an LSH bucket
+ */
+typedef uint32_t FeatureIndex;
+/** The id from which we can get a bucket back in an LSH table
+ */
+typedef unsigned int BucketKey;
+
+/** A bucket in an LSH table
+ */
+typedef std::vector<FeatureIndex> Bucket;
+
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+/** POD for stats about an LSH table
+ */
+struct LshStats
+{
+ std::vector<unsigned int> bucket_sizes_;
+ size_t n_buckets_;
+ size_t bucket_size_mean_;
+ size_t bucket_size_median_;
+ size_t bucket_size_min_;
+ size_t bucket_size_max_;
+ size_t bucket_size_std_dev;
+ /** Each contained vector contains three value: beginning/end for interval, number of elements in the bin
+ */
+ std::vector<std::vector<unsigned int> > size_histogram_;
+};
+
+/** Overload the << operator for LshStats
+ * @param out the streams
+ * @param stats the stats to display
+ * @return the streams
+ */
+inline std::ostream& operator <<(std::ostream& out, const LshStats& stats)
+{
+ int w = 20;
+ out << "Lsh Table Stats:\n" << std::setw(w) << std::setiosflags(std::ios::right) << "N buckets : "
+ << stats.n_buckets_ << "\n" << std::setw(w) << std::setiosflags(std::ios::right) << "mean size : "
+ << std::setiosflags(std::ios::left) << stats.bucket_size_mean_ << "\n" << std::setw(w)
+ << std::setiosflags(std::ios::right) << "median size : " << stats.bucket_size_median_ << "\n" << std::setw(w)
+ << std::setiosflags(std::ios::right) << "min size : " << std::setiosflags(std::ios::left)
+ << stats.bucket_size_min_ << "\n" << std::setw(w) << std::setiosflags(std::ios::right) << "max size : "
+ << std::setiosflags(std::ios::left) << stats.bucket_size_max_;
+
+ // Display the histogram
+ out << std::endl << std::setw(w) << std::setiosflags(std::ios::right) << "histogram : "
+ << std::setiosflags(std::ios::left);
+ for (std::vector<std::vector<unsigned int> >::const_iterator iterator = stats.size_histogram_.begin(), end =
+ stats.size_histogram_.end(); iterator != end; ++iterator) out << (*iterator)[0] << "-" << (*iterator)[1] << ": " << (*iterator)[2] << ", ";
+
+ return out;
+}
+
+
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+/** Lsh hash table. As its key is a sub-feature, and as usually
+ * the size of it is pretty small, we keep it as a continuous memory array.
+ * The value is an index in the corpus of features (we keep it as an unsigned
+ * int for pure memory reasons, it could be a size_t)
+ */
+template<typename ElementType>
+class LshTable
+{
+public:
+ /** A container of all the feature indices. Optimized for space
+ */
+#if USE_UNORDERED_MAP
+ typedef std::unordered_map<BucketKey, Bucket> BucketsSpace;
+#else
+ typedef std::map<BucketKey, Bucket> BucketsSpace;
+#endif
+
+ /** A container of all the feature indices. Optimized for speed
+ */
+ typedef std::vector<Bucket> BucketsSpeed;
+
+ /** Default constructor
+ */
+ LshTable()
+ {
+ }
+
+ /** Default constructor
+ * Create the mask and allocate the memory
+ * @param feature_size is the size of the feature (considered as a ElementType[])
+ * @param key_size is the number of bits that are turned on in the feature
+ * @param indices
+ */
+ LshTable(unsigned int feature_size, unsigned int key_size, std::vector<size_t> & indices)
+ {
+ (void)feature_size;
+ (void)key_size;
+ (void)indices;
+ std::cerr << "LSH is not implemented for that type" << std::endl;
+ assert(0);
+ }
+
+ /** Add a feature to the table
+ * @param value the value to store for that feature
+ * @param feature the feature itself
+ */
+ void add(unsigned int value, const ElementType* feature)
+ {
+ // Add the value to the corresponding bucket
+ BucketKey key = (lsh::BucketKey)getKey(feature);
+
+ switch (speed_level_) {
+ case kArray:
+ // That means we get the buckets from an array
+ buckets_speed_[key].push_back(value);
+ break;
+ case kBitsetHash:
+ // That means we can check the bitset for the presence of a key
+ key_bitset_.set(key);
+ buckets_space_[key].push_back(value);
+ break;
+ case kHash:
+ {
+ // That means we have to check for the hash table for the presence of a key
+ buckets_space_[key].push_back(value);
+ break;
+ }
+ }
+ }
+
+ /** Add a set of features to the table
+ * @param indexed_ofst previous indexed offset
+ * @param dataset the values to store
+ */
+ void add(int indexed_ofst, Matrix<ElementType> dataset)
+ {
+#if USE_UNORDERED_MAP
+ buckets_space_.rehash((buckets_space_.size() + dataset.rows) * 1.2);
+#endif
+ // Add the features to the table
+ for (unsigned int i = 0; i < dataset.rows; ++i) add(i + indexed_ofst, dataset[i]);
+ // Now that the table is full, optimize it for speed/space
+ optimize();
+ }
+
+ /** Get a bucket given the key
+ * @param key
+ * @return
+ */
+ inline const Bucket* getBucketFromKey(BucketKey key) const
+ {
+ // Generate other buckets
+ switch (speed_level_) {
+ case kArray:
+ // That means we get the buckets from an array
+ return &buckets_speed_[key];
+ break;
+ case kBitsetHash:
+ // That means we can check the bitset for the presence of a key
+ if (key_bitset_.test(key)) return &buckets_space_.find(key)->second;
+ else return 0;
+ break;
+ case kHash:
+ {
+ // That means we have to check for the hash table for the presence of a key
+ BucketsSpace::const_iterator bucket_it, bucket_end = buckets_space_.end();
+ bucket_it = buckets_space_.find(key);
+ // Stop here if that bucket does not exist
+ if (bucket_it == bucket_end) return 0;
+ else return &bucket_it->second;
+ break;
+ }
+ }
+ return 0;
+ }
+
+ /** Compute the sub-signature of a feature
+ */
+ size_t getKey(const ElementType* /*feature*/) const
+ {
+ std::cerr << "LSH is not implemented for that type" << std::endl;
+ assert(0);
+ return 1;
+ }
+
+ /** Get statistics about the table
+ * @return
+ */
+ LshStats getStats() const;
+
+private:
+ /** defines the speed fo the implementation
+ * kArray uses a vector for storing data
+ * kBitsetHash uses a hash map but checks for the validity of a key with a bitset
+ * kHash uses a hash map only
+ */
+ enum SpeedLevel
+ {
+ kArray, kBitsetHash, kHash
+ };
+
+ /** Initialize some variables
+ */
+ void initialize(size_t key_size)
+ {
+ const size_t key_size_lower_bound = 1;
+ //a value (size_t(1) << key_size) must fit the size_t type so key_size has to be strictly less than size of size_t
+ const size_t key_size_upper_bound = std::min(sizeof(BucketKey) * CHAR_BIT + 1, sizeof(size_t) * CHAR_BIT);
+ if (key_size < key_size_lower_bound || key_size >= key_size_upper_bound)
+ {
+ std::stringstream errorMessage;
+ errorMessage << "Invalid key_size (=" << key_size << "). Valid values for your system are " << key_size_lower_bound << " <= key_size < " << key_size_upper_bound << ".";
+ CV_Error(CV_StsBadArg, errorMessage.str());
+ }
+
+ speed_level_ = kHash;
+ key_size_ = (unsigned)key_size;
+ }
+
+ /** Optimize the table for speed/space
+ */
+ void optimize()
+ {
+ // If we are already using the fast storage, no need to do anything
+ if (speed_level_ == kArray) return;
+
+ // Use an array if it will be more than half full
+ if (buckets_space_.size() > ((size_t(1) << key_size_) / 2)) {
+ speed_level_ = kArray;
+ // Fill the array version of it
+ buckets_speed_.resize(size_t(1) << key_size_);
+ for (BucketsSpace::const_iterator key_bucket = buckets_space_.begin(); key_bucket != buckets_space_.end(); ++key_bucket) buckets_speed_[key_bucket->first] = key_bucket->second;
+
+ // Empty the hash table
+ buckets_space_.clear();
+ return;
+ }
+
+ // If the bitset is going to use less than 10% of the RAM of the hash map (at least 1 size_t for the key and two
+ // for the vector) or less than 512MB (key_size_ <= 30)
+ if (((std::max(buckets_space_.size(), buckets_speed_.size()) * CHAR_BIT * 3 * sizeof(BucketKey)) / 10
+ >= (size_t(1) << key_size_)) || (key_size_ <= 32)) {
+ speed_level_ = kBitsetHash;
+ key_bitset_.resize(size_t(1) << key_size_);
+ key_bitset_.reset();
+ // Try with the BucketsSpace
+ for (BucketsSpace::const_iterator key_bucket = buckets_space_.begin(); key_bucket != buckets_space_.end(); ++key_bucket) key_bitset_.set(key_bucket->first);
+ }
+ else {
+ speed_level_ = kHash;
+ key_bitset_.clear();
+ }
+ }
+
+ /** The vector of all the buckets if they are held for speed
+ */
+ BucketsSpeed buckets_speed_;
+
+ /** The hash table of all the buckets in case we cannot use the speed version
+ */
+ BucketsSpace buckets_space_;
+
+ /** What is used to store the data */
+ SpeedLevel speed_level_;
+
+ /** If the subkey is small enough, it will keep track of which subkeys are set through that bitset
+ * That is just a speedup so that we don't look in the hash table (which can be mush slower that checking a bitset)
+ */
+ DynamicBitset key_bitset_;
+
+ /** The size of the sub-signature in bits
+ */
+ unsigned int key_size_;
+
+ // Members only used for the unsigned char specialization
+ /** The mask to apply to a feature to get the hash key
+ * Only used in the unsigned char case
+ */
+ std::vector<size_t> mask_;
+};
+
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+// Specialization for unsigned char
+
+template<>
+inline LshTable<unsigned char>::LshTable( unsigned int feature_size,
+ unsigned int subsignature_size,
+ std::vector<size_t> & indices )
+{
+ initialize(subsignature_size);
+ // Allocate the mask
+ mask_ = std::vector<size_t>((size_t)ceil((float)(feature_size * sizeof(char)) / (float)sizeof(size_t)), 0);
+
+ // Generate a random set of order of subsignature_size_ bits
+ for (unsigned int i = 0; i < key_size_; ++i) {
+ //Ensure the Nth bit will be selected only once among the different LshTables
+ //to avoid having two different tables with signatures sharing many dimensions/many bits
+ size_t index = indices[0];
+ indices.erase( indices.begin() );
+
+ // Set that bit in the mask
+ size_t divisor = CHAR_BIT * sizeof(size_t);
+ size_t idx = index / divisor; //pick the right size_t index
+ mask_[idx] |= size_t(1) << (index % divisor); //use modulo to find the bit offset
+ }
+
+ // Set to 1 if you want to display the mask for debug
+#if 0
+ {
+ size_t bcount = 0;
+ BOOST_FOREACH(size_t mask_block, mask_){
+ out << std::setw(sizeof(size_t) * CHAR_BIT / 4) << std::setfill('0') << std::hex << mask_block
+ << std::endl;
+ bcount += __builtin_popcountll(mask_block);
+ }
+ out << "bit count : " << std::dec << bcount << std::endl;
+ out << "mask size : " << mask_.size() << std::endl;
+ return out;
+ }
+#endif
+}
+
+/** Return the Subsignature of a feature
+ * @param feature the feature to analyze
+ */
+template<>
+inline size_t LshTable<unsigned char>::getKey(const unsigned char* feature) const
+{
+ // no need to check if T is dividable by sizeof(size_t) like in the Hamming
+ // distance computation as we have a mask
+ const size_t* feature_block_ptr = reinterpret_cast<const size_t*> ((const void*)feature);
+
+ // Figure out the subsignature of the feature
+ // Given the feature ABCDEF, and the mask 001011, the output will be
+ // 000CEF
+ size_t subsignature = 0;
+ size_t bit_index = 1;
+
+ for (std::vector<size_t>::const_iterator pmask_block = mask_.begin(); pmask_block != mask_.end(); ++pmask_block) {
+ // get the mask and signature blocks
+ size_t feature_block = *feature_block_ptr;
+ size_t mask_block = *pmask_block;
+ while (mask_block) {
+ // Get the lowest set bit in the mask block
+ size_t lowest_bit = mask_block & (-(ptrdiff_t)mask_block);
+ // Add it to the current subsignature if necessary
+ subsignature += (feature_block & lowest_bit) ? bit_index : 0;
+ // Reset the bit in the mask block
+ mask_block ^= lowest_bit;
+ // increment the bit index for the subsignature
+ bit_index <<= 1;
+ }
+ // Check the next feature block
+ ++feature_block_ptr;
+ }
+ return subsignature;
+}
+
+template<>
+inline LshStats LshTable<unsigned char>::getStats() const
+{
+ LshStats stats;
+ stats.bucket_size_mean_ = 0;
+ if ((buckets_speed_.empty()) && (buckets_space_.empty())) {
+ stats.n_buckets_ = 0;
+ stats.bucket_size_median_ = 0;
+ stats.bucket_size_min_ = 0;
+ stats.bucket_size_max_ = 0;
+ return stats;
+ }
+
+ if (!buckets_speed_.empty()) {
+ for (BucketsSpeed::const_iterator pbucket = buckets_speed_.begin(); pbucket != buckets_speed_.end(); ++pbucket) {
+ stats.bucket_sizes_.push_back((lsh::FeatureIndex)pbucket->size());
+ stats.bucket_size_mean_ += pbucket->size();
+ }
+ stats.bucket_size_mean_ /= buckets_speed_.size();
+ stats.n_buckets_ = buckets_speed_.size();
+ }
+ else {
+ for (BucketsSpace::const_iterator x = buckets_space_.begin(); x != buckets_space_.end(); ++x) {
+ stats.bucket_sizes_.push_back((lsh::FeatureIndex)x->second.size());
+ stats.bucket_size_mean_ += x->second.size();
+ }
+ stats.bucket_size_mean_ /= buckets_space_.size();
+ stats.n_buckets_ = buckets_space_.size();
+ }
+
+ std::sort(stats.bucket_sizes_.begin(), stats.bucket_sizes_.end());
+
+ // BOOST_FOREACH(int size, stats.bucket_sizes_)
+ // std::cout << size << " ";
+ // std::cout << std::endl;
+ stats.bucket_size_median_ = stats.bucket_sizes_[stats.bucket_sizes_.size() / 2];
+ stats.bucket_size_min_ = stats.bucket_sizes_.front();
+ stats.bucket_size_max_ = stats.bucket_sizes_.back();
+
+ // TODO compute mean and std
+ /*float mean, stddev;
+ stats.bucket_size_mean_ = mean;
+ stats.bucket_size_std_dev = stddev;*/
+
+ // Include a histogram of the buckets
+ unsigned int bin_start = 0;
+ unsigned int bin_end = 20;
+ bool is_new_bin = true;
+ for (std::vector<unsigned int>::iterator iterator = stats.bucket_sizes_.begin(), end = stats.bucket_sizes_.end(); iterator
+ != end; )
+ if (*iterator < bin_end) {
+ if (is_new_bin) {
+ stats.size_histogram_.push_back(std::vector<unsigned int>(3, 0));
+ stats.size_histogram_.back()[0] = bin_start;
+ stats.size_histogram_.back()[1] = bin_end - 1;
+ is_new_bin = false;
+ }
+ ++stats.size_histogram_.back()[2];
+ ++iterator;
+ }
+ else {
+ bin_start += 20;
+ bin_end += 20;
+ is_new_bin = true;
+ }
+
+ return stats;
+}
+
+// End the two namespaces
+}
+}
+
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+#endif /* OPENCV_FLANN_LSH_TABLE_H_ */
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/matrix.h b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/matrix.h
new file mode 100644
index 00000000..51b6c635
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/matrix.h
@@ -0,0 +1,116 @@
+/***********************************************************************
+ * Software License Agreement (BSD License)
+ *
+ * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved.
+ * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved.
+ *
+ * THE BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *************************************************************************/
+
+#ifndef OPENCV_FLANN_DATASET_H_
+#define OPENCV_FLANN_DATASET_H_
+
+#include <stdio.h>
+
+#include "general.h"
+
+namespace cvflann
+{
+
+/**
+ * Class that implements a simple rectangular matrix stored in a memory buffer and
+ * provides convenient matrix-like access using the [] operators.
+ */
+template <typename T>
+class Matrix
+{
+public:
+ typedef T type;
+
+ size_t rows;
+ size_t cols;
+ size_t stride;
+ T* data;
+
+ Matrix() : rows(0), cols(0), stride(0), data(NULL)
+ {
+ }
+
+ Matrix(T* data_, size_t rows_, size_t cols_, size_t stride_ = 0) :
+ rows(rows_), cols(cols_), stride(stride_), data(data_)
+ {
+ if (stride==0) stride = cols;
+ }
+
+ /**
+ * Convenience function for deallocating the storage data.
+ */
+ FLANN_DEPRECATED void free()
+ {
+ fprintf(stderr, "The cvflann::Matrix<T>::free() method is deprecated "
+ "and it does not do any memory deallocation any more. You are"
+ "responsible for deallocating the matrix memory (by doing"
+ "'delete[] matrix.data' for example)");
+ }
+
+ /**
+ * Operator that return a (pointer to a) row of the data.
+ */
+ T* operator[](size_t index) const
+ {
+ return data+index*stride;
+ }
+};
+
+
+class UntypedMatrix
+{
+public:
+ size_t rows;
+ size_t cols;
+ void* data;
+ flann_datatype_t type;
+
+ UntypedMatrix(void* data_, long rows_, long cols_) :
+ rows(rows_), cols(cols_), data(data_)
+ {
+ }
+
+ ~UntypedMatrix()
+ {
+ }
+
+
+ template<typename T>
+ Matrix<T> as()
+ {
+ return Matrix<T>((T*)data, rows, cols);
+ }
+};
+
+
+
+}
+
+#endif //OPENCV_FLANN_DATASET_H_
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/miniflann.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/miniflann.hpp
new file mode 100644
index 00000000..121f8d05
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/miniflann.hpp
@@ -0,0 +1,163 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef _OPENCV_MINIFLANN_HPP_
+#define _OPENCV_MINIFLANN_HPP_
+
+#ifdef __cplusplus
+
+#include "opencv2/core/core.hpp"
+#include "opencv2/flann/defines.h"
+
+namespace cv
+{
+
+namespace flann
+{
+
+struct CV_EXPORTS IndexParams
+{
+ IndexParams();
+ ~IndexParams();
+
+ std::string getString(const std::string& key, const std::string& defaultVal=std::string()) const;
+ int getInt(const std::string& key, int defaultVal=-1) const;
+ double getDouble(const std::string& key, double defaultVal=-1) const;
+
+ void setString(const std::string& key, const std::string& value);
+ void setInt(const std::string& key, int value);
+ void setDouble(const std::string& key, double value);
+ void setFloat(const std::string& key, float value);
+ void setBool(const std::string& key, bool value);
+ void setAlgorithm(int value);
+
+ void getAll(std::vector<std::string>& names,
+ std::vector<int>& types,
+ std::vector<std::string>& strValues,
+ std::vector<double>& numValues) const;
+
+ void* params;
+};
+
+struct CV_EXPORTS KDTreeIndexParams : public IndexParams
+{
+ KDTreeIndexParams(int trees=4);
+};
+
+struct CV_EXPORTS LinearIndexParams : public IndexParams
+{
+ LinearIndexParams();
+};
+
+struct CV_EXPORTS CompositeIndexParams : public IndexParams
+{
+ CompositeIndexParams(int trees = 4, int branching = 32, int iterations = 11,
+ cvflann::flann_centers_init_t centers_init = cvflann::FLANN_CENTERS_RANDOM, float cb_index = 0.2f );
+};
+
+struct CV_EXPORTS AutotunedIndexParams : public IndexParams
+{
+ AutotunedIndexParams(float target_precision = 0.8f, float build_weight = 0.01f,
+ float memory_weight = 0, float sample_fraction = 0.1f);
+};
+
+struct CV_EXPORTS HierarchicalClusteringIndexParams : public IndexParams
+{
+ HierarchicalClusteringIndexParams(int branching = 32,
+ cvflann::flann_centers_init_t centers_init = cvflann::FLANN_CENTERS_RANDOM, int trees = 4, int leaf_size = 100 );
+};
+
+struct CV_EXPORTS KMeansIndexParams : public IndexParams
+{
+ KMeansIndexParams(int branching = 32, int iterations = 11,
+ cvflann::flann_centers_init_t centers_init = cvflann::FLANN_CENTERS_RANDOM, float cb_index = 0.2f );
+};
+
+struct CV_EXPORTS LshIndexParams : public IndexParams
+{
+ LshIndexParams(int table_number, int key_size, int multi_probe_level);
+};
+
+struct CV_EXPORTS SavedIndexParams : public IndexParams
+{
+ SavedIndexParams(const std::string& filename);
+};
+
+struct CV_EXPORTS SearchParams : public IndexParams
+{
+ SearchParams( int checks = 32, float eps = 0, bool sorted = true );
+};
+
+class CV_EXPORTS_W Index
+{
+public:
+ CV_WRAP Index();
+ CV_WRAP Index(InputArray features, const IndexParams& params, cvflann::flann_distance_t distType=cvflann::FLANN_DIST_L2);
+ virtual ~Index();
+
+ CV_WRAP virtual void build(InputArray wholefeatures, InputArray additionalfeatures, const IndexParams& params, cvflann::flann_distance_t distType=cvflann::FLANN_DIST_L2);
+
+ CV_WRAP virtual void knnSearch(InputArray query, OutputArray indices,
+ OutputArray dists, int knn, const SearchParams& params=SearchParams());
+
+ CV_WRAP virtual int radiusSearch(InputArray query, OutputArray indices,
+ OutputArray dists, double radius, int maxResults,
+ const SearchParams& params=SearchParams());
+
+ CV_WRAP virtual void save(const std::string& filename) const;
+ CV_WRAP virtual bool load(InputArray features, const std::string& filename);
+ CV_WRAP virtual void release();
+ CV_WRAP cvflann::flann_distance_t getDistance() const;
+ CV_WRAP cvflann::flann_algorithm_t getAlgorithm() const;
+
+protected:
+ cvflann::flann_distance_t distType;
+ cvflann::flann_algorithm_t algo;
+ int featureType;
+ void* index;
+};
+
+} } // namespace cv::flann
+
+#endif // __cplusplus
+
+#endif
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/nn_index.h b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/nn_index.h
new file mode 100644
index 00000000..4a874f58
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/nn_index.h
@@ -0,0 +1,184 @@
+/***********************************************************************
+ * Software License Agreement (BSD License)
+ *
+ * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved.
+ * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved.
+ *
+ * THE BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *************************************************************************/
+
+#ifndef OPENCV_FLANN_NNINDEX_H
+#define OPENCV_FLANN_NNINDEX_H
+
+#include <string>
+
+#include "general.h"
+#include "matrix.h"
+#include "result_set.h"
+#include "params.h"
+
+namespace cvflann
+{
+
+/**
+ * Nearest-neighbour index base class
+ */
+template <typename Distance>
+class NNIndex
+{
+ typedef typename Distance::ElementType ElementType;
+ typedef typename Distance::ResultType DistanceType;
+
+public:
+
+ virtual ~NNIndex() {}
+
+ /**
+ * \brief Builds the index
+ */
+ virtual void buildIndex() = 0;
+
+ /**
+ * \brief implementation for algorithms of addable indexes after that.
+ */
+ virtual void addIndex(const Matrix<ElementType>& wholeData, const Matrix<ElementType>& additionalData) = 0;
+
+ /**
+ * \brief Perform k-nearest neighbor search
+ * \param[in] queries The query points for which to find the nearest neighbors
+ * \param[out] indices The indices of the nearest neighbors found
+ * \param[out] dists Distances to the nearest neighbors found
+ * \param[in] knn Number of nearest neighbors to return
+ * \param[in] params Search parameters
+ */
+ virtual void knnSearch(const Matrix<ElementType>& queries, Matrix<int>& indices, Matrix<DistanceType>& dists, int knn, const SearchParams& params)
+ {
+ assert(queries.cols == veclen());
+ assert(indices.rows >= queries.rows);
+ assert(dists.rows >= queries.rows);
+ assert(int(indices.cols) >= knn);
+ assert(int(dists.cols) >= knn);
+
+#if 0
+ KNNResultSet<DistanceType> resultSet(knn);
+ for (size_t i = 0; i < queries.rows; i++) {
+ resultSet.init(indices[i], dists[i]);
+ findNeighbors(resultSet, queries[i], params);
+ }
+#else
+ KNNUniqueResultSet<DistanceType> resultSet(knn);
+ for (size_t i = 0; i < queries.rows; i++) {
+ resultSet.clear();
+ findNeighbors(resultSet, queries[i], params);
+ if (get_param(params,"sorted",true)) resultSet.sortAndCopy(indices[i], dists[i], knn);
+ else resultSet.copy(indices[i], dists[i], knn);
+ }
+#endif
+ }
+
+ /**
+ * \brief Perform radius search
+ * \param[in] query The query point
+ * \param[out] indices The indinces of the neighbors found within the given radius
+ * \param[out] dists The distances to the nearest neighbors found
+ * \param[in] radius The radius used for search
+ * \param[in] params Search parameters
+ * \returns Number of neighbors found
+ */
+ virtual int radiusSearch(const Matrix<ElementType>& query, Matrix<int>& indices, Matrix<DistanceType>& dists, float radius, const SearchParams& params)
+ {
+ if (query.rows != 1) {
+ fprintf(stderr, "I can only search one feature at a time for range search\n");
+ return -1;
+ }
+ assert(query.cols == veclen());
+ assert(indices.cols == dists.cols);
+
+ int n = 0;
+ int* indices_ptr = NULL;
+ DistanceType* dists_ptr = NULL;
+ if (indices.cols > 0) {
+ n = (int)indices.cols;
+ indices_ptr = indices[0];
+ dists_ptr = dists[0];
+ }
+
+ RadiusUniqueResultSet<DistanceType> resultSet((DistanceType)radius);
+ resultSet.clear();
+ findNeighbors(resultSet, query[0], params);
+ if (n>0) {
+ if (get_param(params,"sorted",true)) resultSet.sortAndCopy(indices_ptr, dists_ptr, n);
+ else resultSet.copy(indices_ptr, dists_ptr, n);
+ }
+
+ return (int)resultSet.size();
+ }
+
+ /**
+ * \brief Saves the index to a stream
+ * \param stream The stream to save the index to
+ */
+ virtual void saveIndex(FILE* stream) = 0;
+
+ /**
+ * \brief Loads the index from a stream
+ * \param stream The stream from which the index is loaded
+ */
+ virtual void loadIndex(FILE* stream) = 0;
+
+ /**
+ * \returns number of features in this index.
+ */
+ virtual size_t size() const = 0;
+
+ /**
+ * \returns The dimensionality of the features in this index.
+ */
+ virtual size_t veclen() const = 0;
+
+ /**
+ * \returns The amount of memory (in bytes) used by the index.
+ */
+ virtual int usedMemory() const = 0;
+
+ /**
+ * \returns The index type (kdtree, kmeans,...)
+ */
+ virtual flann_algorithm_t getType() const = 0;
+
+ /**
+ * \returns The index parameters
+ */
+ virtual IndexParams getParameters() const = 0;
+
+
+ /**
+ * \brief Method that searches for nearest-neighbours
+ */
+ virtual void findNeighbors(ResultSet<DistanceType>& result, const ElementType* vec, const SearchParams& searchParams) = 0;
+};
+
+}
+
+#endif //OPENCV_FLANN_NNINDEX_H
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/object_factory.h b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/object_factory.h
new file mode 100644
index 00000000..7f971c5a
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/object_factory.h
@@ -0,0 +1,91 @@
+/***********************************************************************
+ * Software License Agreement (BSD License)
+ *
+ * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved.
+ * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved.
+ *
+ * THE BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *************************************************************************/
+
+#ifndef OPENCV_FLANN_OBJECT_FACTORY_H_
+#define OPENCV_FLANN_OBJECT_FACTORY_H_
+
+#include <map>
+
+namespace cvflann
+{
+
+class CreatorNotFound
+{
+};
+
+template<typename BaseClass,
+ typename UniqueIdType,
+ typename ObjectCreator = BaseClass* (*)()>
+class ObjectFactory
+{
+ typedef ObjectFactory<BaseClass,UniqueIdType,ObjectCreator> ThisClass;
+ typedef std::map<UniqueIdType, ObjectCreator> ObjectRegistry;
+
+ // singleton class, private constructor
+ ObjectFactory() {}
+
+public:
+
+ bool subscribe(UniqueIdType id, ObjectCreator creator)
+ {
+ if (object_registry.find(id) != object_registry.end()) return false;
+
+ object_registry[id] = creator;
+ return true;
+ }
+
+ bool unregister(UniqueIdType id)
+ {
+ return object_registry.erase(id) == 1;
+ }
+
+ ObjectCreator create(UniqueIdType id)
+ {
+ typename ObjectRegistry::const_iterator iter = object_registry.find(id);
+
+ if (iter == object_registry.end()) {
+ throw CreatorNotFound();
+ }
+
+ return iter->second;
+ }
+
+ static ThisClass& instance()
+ {
+ static ThisClass the_factory;
+ return the_factory;
+ }
+private:
+ ObjectRegistry object_registry;
+};
+
+}
+
+#endif /* OPENCV_FLANN_OBJECT_FACTORY_H_ */
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/params.h b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/params.h
new file mode 100644
index 00000000..b40c39e3
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/params.h
@@ -0,0 +1,99 @@
+/***********************************************************************
+ * Software License Agreement (BSD License)
+ *
+ * Copyright 2008-2011 Marius Muja (mariusm@cs.ubc.ca). All rights reserved.
+ * Copyright 2008-2011 David G. Lowe (lowe@cs.ubc.ca). All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *************************************************************************/
+
+
+#ifndef OPENCV_FLANN_PARAMS_H_
+#define OPENCV_FLANN_PARAMS_H_
+
+#include "any.h"
+#include "general.h"
+#include <iostream>
+#include <map>
+
+
+namespace cvflann
+{
+
+typedef std::map<std::string, any> IndexParams;
+
+struct SearchParams : public IndexParams
+{
+ SearchParams(int checks = 32, float eps = 0, bool sorted = true )
+ {
+ // how many leafs to visit when searching for neighbours (-1 for unlimited)
+ (*this)["checks"] = checks;
+ // search for eps-approximate neighbours (default: 0)
+ (*this)["eps"] = eps;
+ // only for radius search, require neighbours sorted by distance (default: true)
+ (*this)["sorted"] = sorted;
+ }
+};
+
+
+template<typename T>
+T get_param(const IndexParams& params, std::string name, const T& default_value)
+{
+ IndexParams::const_iterator it = params.find(name);
+ if (it != params.end()) {
+ return it->second.cast<T>();
+ }
+ else {
+ return default_value;
+ }
+}
+
+template<typename T>
+T get_param(const IndexParams& params, std::string name)
+{
+ IndexParams::const_iterator it = params.find(name);
+ if (it != params.end()) {
+ return it->second.cast<T>();
+ }
+ else {
+ throw FLANNException(std::string("Missing parameter '")+name+std::string("' in the parameters given"));
+ }
+}
+
+inline void print_params(const IndexParams& params, std::ostream& stream)
+{
+ IndexParams::const_iterator it;
+
+ for(it=params.begin(); it!=params.end(); ++it) {
+ stream << it->first << " : " << it->second << std::endl;
+ }
+}
+
+inline void print_params(const IndexParams& params)
+{
+ print_params(params, std::cout);
+}
+
+}
+
+
+#endif /* OPENCV_FLANN_PARAMS_H_ */
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/random.h b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/random.h
new file mode 100644
index 00000000..a3cf5ec5
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/random.h
@@ -0,0 +1,133 @@
+/***********************************************************************
+ * Software License Agreement (BSD License)
+ *
+ * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved.
+ * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved.
+ *
+ * THE BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *************************************************************************/
+
+#ifndef OPENCV_FLANN_RANDOM_H
+#define OPENCV_FLANN_RANDOM_H
+
+#include <algorithm>
+#include <cstdlib>
+#include <vector>
+
+#include "general.h"
+
+namespace cvflann
+{
+
+/**
+ * Seeds the random number generator
+ * @param seed Random seed
+ */
+inline void seed_random(unsigned int seed)
+{
+ srand(seed);
+}
+
+/*
+ * Generates a random double value.
+ */
+/**
+ * Generates a random double value.
+ * @param high Upper limit
+ * @param low Lower limit
+ * @return Random double value
+ */
+inline double rand_double(double high = 1.0, double low = 0)
+{
+ return low + ((high-low) * (std::rand() / (RAND_MAX + 1.0)));
+}
+
+/**
+ * Generates a random integer value.
+ * @param high Upper limit
+ * @param low Lower limit
+ * @return Random integer value
+ */
+inline int rand_int(int high = RAND_MAX, int low = 0)
+{
+ return low + (int) ( double(high-low) * (std::rand() / (RAND_MAX + 1.0)));
+}
+
+/**
+ * Random number generator that returns a distinct number from
+ * the [0,n) interval each time.
+ */
+class UniqueRandom
+{
+ std::vector<int> vals_;
+ int size_;
+ int counter_;
+
+public:
+ /**
+ * Constructor.
+ * @param n Size of the interval from which to generate
+ * @return
+ */
+ UniqueRandom(int n)
+ {
+ init(n);
+ }
+
+ /**
+ * Initializes the number generator.
+ * @param n the size of the interval from which to generate random numbers.
+ */
+ void init(int n)
+ {
+ // create and initialize an array of size n
+ vals_.resize(n);
+ size_ = n;
+ for (int i = 0; i < size_; ++i) vals_[i] = i;
+
+ // shuffle the elements in the array
+ std::random_shuffle(vals_.begin(), vals_.end());
+
+ counter_ = 0;
+ }
+
+ /**
+ * Return a distinct random integer in greater or equal to 0 and less
+ * than 'n' on each call. It should be called maximum 'n' times.
+ * Returns: a random integer
+ */
+ int next()
+ {
+ if (counter_ == size_) {
+ return -1;
+ }
+ else {
+ return vals_[counter_++];
+ }
+ }
+};
+
+}
+
+#endif //OPENCV_FLANN_RANDOM_H
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/result_set.h b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/result_set.h
new file mode 100644
index 00000000..97500195
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/result_set.h
@@ -0,0 +1,543 @@
+/***********************************************************************
+ * Software License Agreement (BSD License)
+ *
+ * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved.
+ * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved.
+ *
+ * THE BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *************************************************************************/
+
+#ifndef OPENCV_FLANN_RESULTSET_H
+#define OPENCV_FLANN_RESULTSET_H
+
+#include <algorithm>
+#include <cstring>
+#include <iostream>
+#include <limits>
+#include <set>
+#include <vector>
+
+namespace cvflann
+{
+
+/* This record represents a branch point when finding neighbors in
+ the tree. It contains a record of the minimum distance to the query
+ point, as well as the node at which the search resumes.
+ */
+
+template <typename T, typename DistanceType>
+struct BranchStruct
+{
+ T node; /* Tree node at which search resumes */
+ DistanceType mindist; /* Minimum distance to query for all nodes below. */
+
+ BranchStruct() {}
+ BranchStruct(const T& aNode, DistanceType dist) : node(aNode), mindist(dist) {}
+
+ bool operator<(const BranchStruct<T, DistanceType>& rhs) const
+ {
+ return mindist<rhs.mindist;
+ }
+};
+
+
+template <typename DistanceType>
+class ResultSet
+{
+public:
+ virtual ~ResultSet() {}
+
+ virtual bool full() const = 0;
+
+ virtual void addPoint(DistanceType dist, int index) = 0;
+
+ virtual DistanceType worstDist() const = 0;
+
+};
+
+/**
+ * KNNSimpleResultSet does not ensure that the element it holds are unique.
+ * Is used in those cases where the nearest neighbour algorithm used does not
+ * attempt to insert the same element multiple times.
+ */
+template <typename DistanceType>
+class KNNSimpleResultSet : public ResultSet<DistanceType>
+{
+ int* indices;
+ DistanceType* dists;
+ int capacity;
+ int count;
+ DistanceType worst_distance_;
+
+public:
+ KNNSimpleResultSet(int capacity_) : capacity(capacity_), count(0)
+ {
+ }
+
+ void init(int* indices_, DistanceType* dists_)
+ {
+ indices = indices_;
+ dists = dists_;
+ count = 0;
+ worst_distance_ = (std::numeric_limits<DistanceType>::max)();
+ dists[capacity-1] = worst_distance_;
+ }
+
+ size_t size() const
+ {
+ return count;
+ }
+
+ bool full() const
+ {
+ return count == capacity;
+ }
+
+
+ void addPoint(DistanceType dist, int index)
+ {
+ if (dist >= worst_distance_) return;
+ int i;
+ for (i=count; i>0; --i) {
+#ifdef FLANN_FIRST_MATCH
+ if ( (dists[i-1]>dist) || ((dist==dists[i-1])&&(indices[i-1]>index)) )
+#else
+ if (dists[i-1]>dist)
+#endif
+ {
+ if (i<capacity) {
+ dists[i] = dists[i-1];
+ indices[i] = indices[i-1];
+ }
+ }
+ else break;
+ }
+ if (count < capacity) ++count;
+ dists[i] = dist;
+ indices[i] = index;
+ worst_distance_ = dists[capacity-1];
+ }
+
+ DistanceType worstDist() const
+ {
+ return worst_distance_;
+ }
+};
+
+/**
+ * K-Nearest neighbour result set. Ensures that the elements inserted are unique
+ */
+template <typename DistanceType>
+class KNNResultSet : public ResultSet<DistanceType>
+{
+ int* indices;
+ DistanceType* dists;
+ int capacity;
+ int count;
+ DistanceType worst_distance_;
+
+public:
+ KNNResultSet(int capacity_) : capacity(capacity_), count(0)
+ {
+ }
+
+ void init(int* indices_, DistanceType* dists_)
+ {
+ indices = indices_;
+ dists = dists_;
+ count = 0;
+ worst_distance_ = (std::numeric_limits<DistanceType>::max)();
+ dists[capacity-1] = worst_distance_;
+ }
+
+ size_t size() const
+ {
+ return count;
+ }
+
+ bool full() const
+ {
+ return count == capacity;
+ }
+
+
+ void addPoint(DistanceType dist, int index)
+ {
+ if (dist >= worst_distance_) return;
+ int i;
+ for (i = count; i > 0; --i) {
+#ifdef FLANN_FIRST_MATCH
+ if ( (dists[i-1]<=dist) && ((dist!=dists[i-1])||(indices[i-1]<=index)) )
+#else
+ if (dists[i-1]<=dist)
+#endif
+ {
+ // Check for duplicate indices
+ int j = i - 1;
+ while ((j >= 0) && (dists[j] == dist)) {
+ if (indices[j] == index) {
+ return;
+ }
+ --j;
+ }
+ break;
+ }
+ }
+
+ if (count < capacity) ++count;
+ for (int j = count-1; j > i; --j) {
+ dists[j] = dists[j-1];
+ indices[j] = indices[j-1];
+ }
+ dists[i] = dist;
+ indices[i] = index;
+ worst_distance_ = dists[capacity-1];
+ }
+
+ DistanceType worstDist() const
+ {
+ return worst_distance_;
+ }
+};
+
+
+/**
+ * A result-set class used when performing a radius based search.
+ */
+template <typename DistanceType>
+class RadiusResultSet : public ResultSet<DistanceType>
+{
+ DistanceType radius;
+ int* indices;
+ DistanceType* dists;
+ size_t capacity;
+ size_t count;
+
+public:
+ RadiusResultSet(DistanceType radius_, int* indices_, DistanceType* dists_, int capacity_) :
+ radius(radius_), indices(indices_), dists(dists_), capacity(capacity_)
+ {
+ init();
+ }
+
+ ~RadiusResultSet()
+ {
+ }
+
+ void init()
+ {
+ count = 0;
+ }
+
+ size_t size() const
+ {
+ return count;
+ }
+
+ bool full() const
+ {
+ return true;
+ }
+
+ void addPoint(DistanceType dist, int index)
+ {
+ if (dist<radius) {
+ if ((capacity>0)&&(count < capacity)) {
+ dists[count] = dist;
+ indices[count] = index;
+ }
+ count++;
+ }
+ }
+
+ DistanceType worstDist() const
+ {
+ return radius;
+ }
+
+};
+
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+/** Class that holds the k NN neighbors
+ * Faster than KNNResultSet as it uses a binary heap and does not maintain two arrays
+ */
+template<typename DistanceType>
+class UniqueResultSet : public ResultSet<DistanceType>
+{
+public:
+ struct DistIndex
+ {
+ DistIndex(DistanceType dist, unsigned int index) :
+ dist_(dist), index_(index)
+ {
+ }
+ bool operator<(const DistIndex dist_index) const
+ {
+ return (dist_ < dist_index.dist_) || ((dist_ == dist_index.dist_) && index_ < dist_index.index_);
+ }
+ DistanceType dist_;
+ unsigned int index_;
+ };
+
+ /** Default cosntructor */
+ UniqueResultSet() :
+ worst_distance_(std::numeric_limits<DistanceType>::max())
+ {
+ }
+
+ /** Check the status of the set
+ * @return true if we have k NN
+ */
+ inline bool full() const
+ {
+ return is_full_;
+ }
+
+ /** Remove all elements in the set
+ */
+ virtual void clear() = 0;
+
+ /** Copy the set to two C arrays
+ * @param indices pointer to a C array of indices
+ * @param dist pointer to a C array of distances
+ * @param n_neighbors the number of neighbors to copy
+ */
+ virtual void copy(int* indices, DistanceType* dist, int n_neighbors = -1) const
+ {
+ if (n_neighbors < 0) {
+ for (typename std::set<DistIndex>::const_iterator dist_index = dist_indices_.begin(), dist_index_end =
+ dist_indices_.end(); dist_index != dist_index_end; ++dist_index, ++indices, ++dist) {
+ *indices = dist_index->index_;
+ *dist = dist_index->dist_;
+ }
+ }
+ else {
+ int i = 0;
+ for (typename std::set<DistIndex>::const_iterator dist_index = dist_indices_.begin(), dist_index_end =
+ dist_indices_.end(); (dist_index != dist_index_end) && (i < n_neighbors); ++dist_index, ++indices, ++dist, ++i) {
+ *indices = dist_index->index_;
+ *dist = dist_index->dist_;
+ }
+ }
+ }
+
+ /** Copy the set to two C arrays but sort it according to the distance first
+ * @param indices pointer to a C array of indices
+ * @param dist pointer to a C array of distances
+ * @param n_neighbors the number of neighbors to copy
+ */
+ virtual void sortAndCopy(int* indices, DistanceType* dist, int n_neighbors = -1) const
+ {
+ copy(indices, dist, n_neighbors);
+ }
+
+ /** The number of neighbors in the set
+ * @return
+ */
+ size_t size() const
+ {
+ return dist_indices_.size();
+ }
+
+ /** The distance of the furthest neighbor
+ * If we don't have enough neighbors, it returns the max possible value
+ * @return
+ */
+ inline DistanceType worstDist() const
+ {
+ return worst_distance_;
+ }
+protected:
+ /** Flag to say if the set is full */
+ bool is_full_;
+
+ /** The worst distance found so far */
+ DistanceType worst_distance_;
+
+ /** The best candidates so far */
+ std::set<DistIndex> dist_indices_;
+};
+
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+/** Class that holds the k NN neighbors
+ * Faster than KNNResultSet as it uses a binary heap and does not maintain two arrays
+ */
+template<typename DistanceType>
+class KNNUniqueResultSet : public UniqueResultSet<DistanceType>
+{
+public:
+ /** Constructor
+ * @param capacity the number of neighbors to store at max
+ */
+ KNNUniqueResultSet(unsigned int capacity) : capacity_(capacity)
+ {
+ this->is_full_ = false;
+ this->clear();
+ }
+
+ /** Add a possible candidate to the best neighbors
+ * @param dist distance for that neighbor
+ * @param index index of that neighbor
+ */
+ inline void addPoint(DistanceType dist, int index)
+ {
+ // Don't do anything if we are worse than the worst
+ if (dist >= worst_distance_) return;
+ dist_indices_.insert(DistIndex(dist, index));
+
+ if (is_full_) {
+ if (dist_indices_.size() > capacity_) {
+ dist_indices_.erase(*dist_indices_.rbegin());
+ worst_distance_ = dist_indices_.rbegin()->dist_;
+ }
+ }
+ else if (dist_indices_.size() == capacity_) {
+ is_full_ = true;
+ worst_distance_ = dist_indices_.rbegin()->dist_;
+ }
+ }
+
+ /** Remove all elements in the set
+ */
+ void clear()
+ {
+ dist_indices_.clear();
+ worst_distance_ = std::numeric_limits<DistanceType>::max();
+ is_full_ = false;
+ }
+
+protected:
+ typedef typename UniqueResultSet<DistanceType>::DistIndex DistIndex;
+ using UniqueResultSet<DistanceType>::is_full_;
+ using UniqueResultSet<DistanceType>::worst_distance_;
+ using UniqueResultSet<DistanceType>::dist_indices_;
+
+ /** The number of neighbors to keep */
+ unsigned int capacity_;
+};
+
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+/** Class that holds the radius nearest neighbors
+ * It is more accurate than RadiusResult as it is not limited in the number of neighbors
+ */
+template<typename DistanceType>
+class RadiusUniqueResultSet : public UniqueResultSet<DistanceType>
+{
+public:
+ /** Constructor
+ * @param radius the maximum distance of a neighbor
+ */
+ RadiusUniqueResultSet(DistanceType radius) :
+ radius_(radius)
+ {
+ is_full_ = true;
+ }
+
+ /** Add a possible candidate to the best neighbors
+ * @param dist distance for that neighbor
+ * @param index index of that neighbor
+ */
+ void addPoint(DistanceType dist, int index)
+ {
+ if (dist <= radius_) dist_indices_.insert(DistIndex(dist, index));
+ }
+
+ /** Remove all elements in the set
+ */
+ inline void clear()
+ {
+ dist_indices_.clear();
+ }
+
+
+ /** Check the status of the set
+ * @return alwys false
+ */
+ inline bool full() const
+ {
+ return true;
+ }
+
+ /** The distance of the furthest neighbor
+ * If we don't have enough neighbors, it returns the max possible value
+ * @return
+ */
+ inline DistanceType worstDist() const
+ {
+ return radius_;
+ }
+private:
+ typedef typename UniqueResultSet<DistanceType>::DistIndex DistIndex;
+ using UniqueResultSet<DistanceType>::dist_indices_;
+ using UniqueResultSet<DistanceType>::is_full_;
+
+ /** The furthest distance a neighbor can be */
+ DistanceType radius_;
+};
+
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+/** Class that holds the k NN neighbors within a radius distance
+ */
+template<typename DistanceType>
+class KNNRadiusUniqueResultSet : public KNNUniqueResultSet<DistanceType>
+{
+public:
+ /** Constructor
+ * @param capacity the number of neighbors to store at max
+ * @param radius the maximum distance of a neighbor
+ */
+ KNNRadiusUniqueResultSet(unsigned int capacity, DistanceType radius)
+ {
+ this->capacity_ = capacity;
+ this->radius_ = radius;
+ this->dist_indices_.reserve(capacity_);
+ this->clear();
+ }
+
+ /** Remove all elements in the set
+ */
+ void clear()
+ {
+ dist_indices_.clear();
+ worst_distance_ = radius_;
+ is_full_ = false;
+ }
+private:
+ using KNNUniqueResultSet<DistanceType>::dist_indices_;
+ using KNNUniqueResultSet<DistanceType>::is_full_;
+ using KNNUniqueResultSet<DistanceType>::worst_distance_;
+
+ /** The maximum number of neighbors to consider */
+ unsigned int capacity_;
+
+ /** The maximum distance of a neighbor */
+ DistanceType radius_;
+};
+}
+
+#endif //OPENCV_FLANN_RESULTSET_H
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/sampling.h b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/sampling.h
new file mode 100644
index 00000000..396f177a
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/sampling.h
@@ -0,0 +1,81 @@
+/***********************************************************************
+ * Software License Agreement (BSD License)
+ *
+ * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved.
+ * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *************************************************************************/
+
+
+#ifndef OPENCV_FLANN_SAMPLING_H_
+#define OPENCV_FLANN_SAMPLING_H_
+
+#include "matrix.h"
+#include "random.h"
+
+namespace cvflann
+{
+
+template<typename T>
+Matrix<T> random_sample(Matrix<T>& srcMatrix, long size, bool remove = false)
+{
+ Matrix<T> newSet(new T[size * srcMatrix.cols], size,srcMatrix.cols);
+
+ T* src,* dest;
+ for (long i=0; i<size; ++i) {
+ long r = rand_int((int)(srcMatrix.rows-i));
+ dest = newSet[i];
+ src = srcMatrix[r];
+ std::copy(src, src+srcMatrix.cols, dest);
+ if (remove) {
+ src = srcMatrix[srcMatrix.rows-i-1];
+ dest = srcMatrix[r];
+ std::copy(src, src+srcMatrix.cols, dest);
+ }
+ }
+ if (remove) {
+ srcMatrix.rows -= size;
+ }
+ return newSet;
+}
+
+template<typename T>
+Matrix<T> random_sample(const Matrix<T>& srcMatrix, size_t size)
+{
+ UniqueRandom rand((int)srcMatrix.rows);
+ Matrix<T> newSet(new T[size * srcMatrix.cols], size,srcMatrix.cols);
+
+ T* src,* dest;
+ for (size_t i=0; i<size; ++i) {
+ long r = rand.next();
+ dest = newSet[i];
+ src = srcMatrix[r];
+ std::copy(src, src+srcMatrix.cols, dest);
+ }
+ return newSet;
+}
+
+} // namespace
+
+
+#endif /* OPENCV_FLANN_SAMPLING_H_ */
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/saving.h b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/saving.h
new file mode 100644
index 00000000..7e3bea56
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/saving.h
@@ -0,0 +1,187 @@
+/***********************************************************************
+ * Software License Agreement (BSD License)
+ *
+ * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved.
+ * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE NNIndexGOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *************************************************************************/
+
+#ifndef OPENCV_FLANN_SAVING_H_
+#define OPENCV_FLANN_SAVING_H_
+
+#include <cstring>
+#include <vector>
+
+#include "general.h"
+#include "nn_index.h"
+
+#ifdef FLANN_SIGNATURE_
+#undef FLANN_SIGNATURE_
+#endif
+#define FLANN_SIGNATURE_ "FLANN_INDEX"
+
+namespace cvflann
+{
+
+template <typename T>
+struct Datatype {};
+template<>
+struct Datatype<char> { static flann_datatype_t type() { return FLANN_INT8; } };
+template<>
+struct Datatype<short> { static flann_datatype_t type() { return FLANN_INT16; } };
+template<>
+struct Datatype<int> { static flann_datatype_t type() { return FLANN_INT32; } };
+template<>
+struct Datatype<unsigned char> { static flann_datatype_t type() { return FLANN_UINT8; } };
+template<>
+struct Datatype<unsigned short> { static flann_datatype_t type() { return FLANN_UINT16; } };
+template<>
+struct Datatype<unsigned int> { static flann_datatype_t type() { return FLANN_UINT32; } };
+template<>
+struct Datatype<float> { static flann_datatype_t type() { return FLANN_FLOAT32; } };
+template<>
+struct Datatype<double> { static flann_datatype_t type() { return FLANN_FLOAT64; } };
+
+
+/**
+ * Structure representing the index header.
+ */
+struct IndexHeader
+{
+ char signature[16];
+ char version[16];
+ flann_datatype_t data_type;
+ flann_algorithm_t index_type;
+ size_t rows;
+ size_t cols;
+};
+
+/**
+ * Saves index header to stream
+ *
+ * @param stream - Stream to save to
+ * @param index - The index to save
+ */
+template<typename Distance>
+void save_header(FILE* stream, const NNIndex<Distance>& index)
+{
+ IndexHeader header;
+ memset(header.signature, 0, sizeof(header.signature));
+ strcpy(header.signature, FLANN_SIGNATURE_);
+ memset(header.version, 0, sizeof(header.version));
+ strcpy(header.version, FLANN_VERSION_);
+ header.data_type = Datatype<typename Distance::ElementType>::type();
+ header.index_type = index.getType();
+ header.rows = index.size();
+ header.cols = index.veclen();
+
+ std::fwrite(&header, sizeof(header),1,stream);
+}
+
+
+/**
+ *
+ * @param stream - Stream to load from
+ * @return Index header
+ */
+inline IndexHeader load_header(FILE* stream)
+{
+ IndexHeader header;
+ size_t read_size = fread(&header,sizeof(header),1,stream);
+
+ if (read_size!=(size_t)1) {
+ throw FLANNException("Invalid index file, cannot read");
+ }
+
+ if (strcmp(header.signature,FLANN_SIGNATURE_)!=0) {
+ throw FLANNException("Invalid index file, wrong signature");
+ }
+
+ return header;
+
+}
+
+
+template<typename T>
+void save_value(FILE* stream, const T& value, size_t count = 1)
+{
+ fwrite(&value, sizeof(value),count, stream);
+}
+
+template<typename T>
+void save_value(FILE* stream, const cvflann::Matrix<T>& value)
+{
+ fwrite(&value, sizeof(value),1, stream);
+ fwrite(value.data, sizeof(T),value.rows*value.cols, stream);
+}
+
+template<typename T>
+void save_value(FILE* stream, const std::vector<T>& value)
+{
+ size_t size = value.size();
+ fwrite(&size, sizeof(size_t), 1, stream);
+ fwrite(&value[0], sizeof(T), size, stream);
+}
+
+template<typename T>
+void load_value(FILE* stream, T& value, size_t count = 1)
+{
+ size_t read_cnt = fread(&value, sizeof(value), count, stream);
+ if (read_cnt != count) {
+ throw FLANNException("Cannot read from file");
+ }
+}
+
+template<typename T>
+void load_value(FILE* stream, cvflann::Matrix<T>& value)
+{
+ size_t read_cnt = fread(&value, sizeof(value), 1, stream);
+ if (read_cnt != 1) {
+ throw FLANNException("Cannot read from file");
+ }
+ value.data = new T[value.rows*value.cols];
+ read_cnt = fread(value.data, sizeof(T), value.rows*value.cols, stream);
+ if (read_cnt != (size_t)(value.rows*value.cols)) {
+ throw FLANNException("Cannot read from file");
+ }
+}
+
+
+template<typename T>
+void load_value(FILE* stream, std::vector<T>& value)
+{
+ size_t size;
+ size_t read_cnt = fread(&size, sizeof(size_t), 1, stream);
+ if (read_cnt!=1) {
+ throw FLANNException("Cannot read from file");
+ }
+ value.resize(size);
+ read_cnt = fread(&value[0], sizeof(T), size, stream);
+ if (read_cnt != size) {
+ throw FLANNException("Cannot read from file");
+ }
+}
+
+}
+
+#endif /* OPENCV_FLANN_SAVING_H_ */
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/simplex_downhill.h b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/simplex_downhill.h
new file mode 100644
index 00000000..145901ab
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/simplex_downhill.h
@@ -0,0 +1,186 @@
+/***********************************************************************
+ * Software License Agreement (BSD License)
+ *
+ * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved.
+ * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved.
+ *
+ * THE BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *************************************************************************/
+
+#ifndef OPENCV_FLANN_SIMPLEX_DOWNHILL_H_
+#define OPENCV_FLANN_SIMPLEX_DOWNHILL_H_
+
+namespace cvflann
+{
+
+/**
+ Adds val to array vals (and point to array points) and keeping the arrays sorted by vals.
+ */
+template <typename T>
+void addValue(int pos, float val, float* vals, T* point, T* points, int n)
+{
+ vals[pos] = val;
+ for (int i=0; i<n; ++i) {
+ points[pos*n+i] = point[i];
+ }
+
+ // bubble down
+ int j=pos;
+ while (j>0 && vals[j]<vals[j-1]) {
+ swap(vals[j],vals[j-1]);
+ for (int i=0; i<n; ++i) {
+ swap(points[j*n+i],points[(j-1)*n+i]);
+ }
+ --j;
+ }
+}
+
+
+/**
+ Simplex downhill optimization function.
+ Preconditions: points is a 2D mattrix of size (n+1) x n
+ func is the cost function taking n an array of n params and returning float
+ vals is the cost function in the n+1 simplex points, if NULL it will be computed
+
+ Postcondition: returns optimum value and points[0..n] are the optimum parameters
+ */
+template <typename T, typename F>
+float optimizeSimplexDownhill(T* points, int n, F func, float* vals = NULL )
+{
+ const int MAX_ITERATIONS = 10;
+
+ assert(n>0);
+
+ T* p_o = new T[n];
+ T* p_r = new T[n];
+ T* p_e = new T[n];
+
+ int alpha = 1;
+
+ int iterations = 0;
+
+ bool ownVals = false;
+ if (vals == NULL) {
+ ownVals = true;
+ vals = new float[n+1];
+ for (int i=0; i<n+1; ++i) {
+ float val = func(points+i*n);
+ addValue(i, val, vals, points+i*n, points, n);
+ }
+ }
+ int nn = n*n;
+
+ while (true) {
+
+ if (iterations++ > MAX_ITERATIONS) break;
+
+ // compute average of simplex points (except the highest point)
+ for (int j=0; j<n; ++j) {
+ p_o[j] = 0;
+ for (int i=0; i<n; ++i) {
+ p_o[i] += points[j*n+i];
+ }
+ }
+ for (int i=0; i<n; ++i) {
+ p_o[i] /= n;
+ }
+
+ bool converged = true;
+ for (int i=0; i<n; ++i) {
+ if (p_o[i] != points[nn+i]) {
+ converged = false;
+ }
+ }
+ if (converged) break;
+
+ // trying a reflection
+ for (int i=0; i<n; ++i) {
+ p_r[i] = p_o[i] + alpha*(p_o[i]-points[nn+i]);
+ }
+ float val_r = func(p_r);
+
+ if ((val_r>=vals[0])&&(val_r<vals[n])) {
+ // reflection between second highest and lowest
+ // add it to the simplex
+ Logger::info("Choosing reflection\n");
+ addValue(n, val_r,vals, p_r, points, n);
+ continue;
+ }
+
+ if (val_r<vals[0]) {
+ // value is smaller than smalest in simplex
+
+ // expand some more to see if it drops further
+ for (int i=0; i<n; ++i) {
+ p_e[i] = 2*p_r[i]-p_o[i];
+ }
+ float val_e = func(p_e);
+
+ if (val_e<val_r) {
+ Logger::info("Choosing reflection and expansion\n");
+ addValue(n, val_e,vals,p_e,points,n);
+ }
+ else {
+ Logger::info("Choosing reflection\n");
+ addValue(n, val_r,vals,p_r,points,n);
+ }
+ continue;
+ }
+ if (val_r>=vals[n]) {
+ for (int i=0; i<n; ++i) {
+ p_e[i] = (p_o[i]+points[nn+i])/2;
+ }
+ float val_e = func(p_e);
+
+ if (val_e<vals[n]) {
+ Logger::info("Choosing contraction\n");
+ addValue(n,val_e,vals,p_e,points,n);
+ continue;
+ }
+ }
+ {
+ Logger::info("Full contraction\n");
+ for (int j=1; j<=n; ++j) {
+ for (int i=0; i<n; ++i) {
+ points[j*n+i] = (points[j*n+i]+points[i])/2;
+ }
+ float val = func(points+j*n);
+ addValue(j,val,vals,points+j*n,points,n);
+ }
+ }
+ }
+
+ float bestVal = vals[0];
+
+ delete[] p_r;
+ delete[] p_o;
+ delete[] p_e;
+ if (ownVals) delete[] vals;
+
+ return bestVal;
+}
+
+}
+
+#endif //OPENCV_FLANN_SIMPLEX_DOWNHILL_H_
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/timer.h b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/timer.h
new file mode 100644
index 00000000..107371ec
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/flann/timer.h
@@ -0,0 +1,93 @@
+/***********************************************************************
+ * Software License Agreement (BSD License)
+ *
+ * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved.
+ * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved.
+ *
+ * THE BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *************************************************************************/
+
+#ifndef OPENCV_FLANN_TIMER_H
+#define OPENCV_FLANN_TIMER_H
+
+#include <time.h>
+
+
+namespace cvflann
+{
+
+/**
+ * A start-stop timer class.
+ *
+ * Can be used to time portions of code.
+ */
+class StartStopTimer
+{
+ clock_t startTime;
+
+public:
+ /**
+ * Value of the timer.
+ */
+ double value;
+
+
+ /**
+ * Constructor.
+ */
+ StartStopTimer()
+ {
+ reset();
+ }
+
+ /**
+ * Starts the timer.
+ */
+ void start()
+ {
+ startTime = clock();
+ }
+
+ /**
+ * Stops the timer and updates timer value.
+ */
+ void stop()
+ {
+ clock_t stopTime = clock();
+ value += ( (double)stopTime - startTime) / CLOCKS_PER_SEC;
+ }
+
+ /**
+ * Resets the timer value to 0.
+ */
+ void reset()
+ {
+ value = 0;
+ }
+
+};
+
+}
+
+#endif // FLANN_TIMER_H
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/block.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/block.hpp
new file mode 100644
index 00000000..6cc00aed
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/block.hpp
@@ -0,0 +1,203 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_GPU_DEVICE_BLOCK_HPP__
+#define __OPENCV_GPU_DEVICE_BLOCK_HPP__
+
+namespace cv { namespace gpu { namespace device
+{
+ struct Block
+ {
+ static __device__ __forceinline__ unsigned int id()
+ {
+ return blockIdx.x;
+ }
+
+ static __device__ __forceinline__ unsigned int stride()
+ {
+ return blockDim.x * blockDim.y * blockDim.z;
+ }
+
+ static __device__ __forceinline__ void sync()
+ {
+ __syncthreads();
+ }
+
+ static __device__ __forceinline__ int flattenedThreadId()
+ {
+ return threadIdx.z * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x;
+ }
+
+ template<typename It, typename T>
+ static __device__ __forceinline__ void fill(It beg, It end, const T& value)
+ {
+ int STRIDE = stride();
+ It t = beg + flattenedThreadId();
+
+ for(; t < end; t += STRIDE)
+ *t = value;
+ }
+
+ template<typename OutIt, typename T>
+ static __device__ __forceinline__ void yota(OutIt beg, OutIt end, T value)
+ {
+ int STRIDE = stride();
+ int tid = flattenedThreadId();
+ value += tid;
+
+ for(OutIt t = beg + tid; t < end; t += STRIDE, value += STRIDE)
+ *t = value;
+ }
+
+ template<typename InIt, typename OutIt>
+ static __device__ __forceinline__ void copy(InIt beg, InIt end, OutIt out)
+ {
+ int STRIDE = stride();
+ InIt t = beg + flattenedThreadId();
+ OutIt o = out + (t - beg);
+
+ for(; t < end; t += STRIDE, o += STRIDE)
+ *o = *t;
+ }
+
+ template<typename InIt, typename OutIt, class UnOp>
+ static __device__ __forceinline__ void transfrom(InIt beg, InIt end, OutIt out, UnOp op)
+ {
+ int STRIDE = stride();
+ InIt t = beg + flattenedThreadId();
+ OutIt o = out + (t - beg);
+
+ for(; t < end; t += STRIDE, o += STRIDE)
+ *o = op(*t);
+ }
+
+ template<typename InIt1, typename InIt2, typename OutIt, class BinOp>
+ static __device__ __forceinline__ void transfrom(InIt1 beg1, InIt1 end1, InIt2 beg2, OutIt out, BinOp op)
+ {
+ int STRIDE = stride();
+ InIt1 t1 = beg1 + flattenedThreadId();
+ InIt2 t2 = beg2 + flattenedThreadId();
+ OutIt o = out + (t1 - beg1);
+
+ for(; t1 < end1; t1 += STRIDE, t2 += STRIDE, o += STRIDE)
+ *o = op(*t1, *t2);
+ }
+
+ template<int CTA_SIZE, typename T, class BinOp>
+ static __device__ __forceinline__ void reduce(volatile T* buffer, BinOp op)
+ {
+ int tid = flattenedThreadId();
+ T val = buffer[tid];
+
+ if (CTA_SIZE >= 1024) { if (tid < 512) buffer[tid] = val = op(val, buffer[tid + 512]); __syncthreads(); }
+ if (CTA_SIZE >= 512) { if (tid < 256) buffer[tid] = val = op(val, buffer[tid + 256]); __syncthreads(); }
+ if (CTA_SIZE >= 256) { if (tid < 128) buffer[tid] = val = op(val, buffer[tid + 128]); __syncthreads(); }
+ if (CTA_SIZE >= 128) { if (tid < 64) buffer[tid] = val = op(val, buffer[tid + 64]); __syncthreads(); }
+
+ if (tid < 32)
+ {
+ if (CTA_SIZE >= 64) { buffer[tid] = val = op(val, buffer[tid + 32]); }
+ if (CTA_SIZE >= 32) { buffer[tid] = val = op(val, buffer[tid + 16]); }
+ if (CTA_SIZE >= 16) { buffer[tid] = val = op(val, buffer[tid + 8]); }
+ if (CTA_SIZE >= 8) { buffer[tid] = val = op(val, buffer[tid + 4]); }
+ if (CTA_SIZE >= 4) { buffer[tid] = val = op(val, buffer[tid + 2]); }
+ if (CTA_SIZE >= 2) { buffer[tid] = val = op(val, buffer[tid + 1]); }
+ }
+ }
+
+ template<int CTA_SIZE, typename T, class BinOp>
+ static __device__ __forceinline__ T reduce(volatile T* buffer, T init, BinOp op)
+ {
+ int tid = flattenedThreadId();
+ T val = buffer[tid] = init;
+ __syncthreads();
+
+ if (CTA_SIZE >= 1024) { if (tid < 512) buffer[tid] = val = op(val, buffer[tid + 512]); __syncthreads(); }
+ if (CTA_SIZE >= 512) { if (tid < 256) buffer[tid] = val = op(val, buffer[tid + 256]); __syncthreads(); }
+ if (CTA_SIZE >= 256) { if (tid < 128) buffer[tid] = val = op(val, buffer[tid + 128]); __syncthreads(); }
+ if (CTA_SIZE >= 128) { if (tid < 64) buffer[tid] = val = op(val, buffer[tid + 64]); __syncthreads(); }
+
+ if (tid < 32)
+ {
+ if (CTA_SIZE >= 64) { buffer[tid] = val = op(val, buffer[tid + 32]); }
+ if (CTA_SIZE >= 32) { buffer[tid] = val = op(val, buffer[tid + 16]); }
+ if (CTA_SIZE >= 16) { buffer[tid] = val = op(val, buffer[tid + 8]); }
+ if (CTA_SIZE >= 8) { buffer[tid] = val = op(val, buffer[tid + 4]); }
+ if (CTA_SIZE >= 4) { buffer[tid] = val = op(val, buffer[tid + 2]); }
+ if (CTA_SIZE >= 2) { buffer[tid] = val = op(val, buffer[tid + 1]); }
+ }
+ __syncthreads();
+ return buffer[0];
+ }
+
+ template <typename T, class BinOp>
+ static __device__ __forceinline__ void reduce_n(T* data, unsigned int n, BinOp op)
+ {
+ int ftid = flattenedThreadId();
+ int sft = stride();
+
+ if (sft < n)
+ {
+ for (unsigned int i = sft + ftid; i < n; i += sft)
+ data[ftid] = op(data[ftid], data[i]);
+
+ __syncthreads();
+
+ n = sft;
+ }
+
+ while (n > 1)
+ {
+ unsigned int half = n/2;
+
+ if (ftid < half)
+ data[ftid] = op(data[ftid], data[n - ftid - 1]);
+
+ __syncthreads();
+
+ n = n - half;
+ }
+ }
+ };
+}}}
+
+#endif /* __OPENCV_GPU_DEVICE_BLOCK_HPP__ */
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/border_interpolate.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/border_interpolate.hpp
new file mode 100644
index 00000000..693ba216
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/border_interpolate.hpp
@@ -0,0 +1,714 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_GPU_BORDER_INTERPOLATE_HPP__
+#define __OPENCV_GPU_BORDER_INTERPOLATE_HPP__
+
+#include "saturate_cast.hpp"
+#include "vec_traits.hpp"
+#include "vec_math.hpp"
+
+namespace cv { namespace gpu { namespace device
+{
+ //////////////////////////////////////////////////////////////
+ // BrdConstant
+
+ template <typename D> struct BrdRowConstant
+ {
+ typedef D result_type;
+
+ explicit __host__ __device__ __forceinline__ BrdRowConstant(int width_, const D& val_ = VecTraits<D>::all(0)) : width(width_), val(val_) {}
+
+ template <typename T> __device__ __forceinline__ D at_low(int x, const T* data) const
+ {
+ return x >= 0 ? saturate_cast<D>(data[x]) : val;
+ }
+
+ template <typename T> __device__ __forceinline__ D at_high(int x, const T* data) const
+ {
+ return x < width ? saturate_cast<D>(data[x]) : val;
+ }
+
+ template <typename T> __device__ __forceinline__ D at(int x, const T* data) const
+ {
+ return (x >= 0 && x < width) ? saturate_cast<D>(data[x]) : val;
+ }
+
+ const int width;
+ const D val;
+ };
+
+ template <typename D> struct BrdColConstant
+ {
+ typedef D result_type;
+
+ explicit __host__ __device__ __forceinline__ BrdColConstant(int height_, const D& val_ = VecTraits<D>::all(0)) : height(height_), val(val_) {}
+
+ template <typename T> __device__ __forceinline__ D at_low(int y, const T* data, size_t step) const
+ {
+ return y >= 0 ? saturate_cast<D>(*(const T*)((const char*)data + y * step)) : val;
+ }
+
+ template <typename T> __device__ __forceinline__ D at_high(int y, const T* data, size_t step) const
+ {
+ return y < height ? saturate_cast<D>(*(const T*)((const char*)data + y * step)) : val;
+ }
+
+ template <typename T> __device__ __forceinline__ D at(int y, const T* data, size_t step) const
+ {
+ return (y >= 0 && y < height) ? saturate_cast<D>(*(const T*)((const char*)data + y * step)) : val;
+ }
+
+ const int height;
+ const D val;
+ };
+
+ template <typename D> struct BrdConstant
+ {
+ typedef D result_type;
+
+ __host__ __device__ __forceinline__ BrdConstant(int height_, int width_, const D& val_ = VecTraits<D>::all(0)) : height(height_), width(width_), val(val_)
+ {
+ }
+
+ template <typename T> __device__ __forceinline__ D at(int y, int x, const T* data, size_t step) const
+ {
+ return (x >= 0 && x < width && y >= 0 && y < height) ? saturate_cast<D>(((const T*)((const uchar*)data + y * step))[x]) : val;
+ }
+
+ template <typename Ptr2D> __device__ __forceinline__ D at(typename Ptr2D::index_type y, typename Ptr2D::index_type x, const Ptr2D& src) const
+ {
+ return (x >= 0 && x < width && y >= 0 && y < height) ? saturate_cast<D>(src(y, x)) : val;
+ }
+
+ const int height;
+ const int width;
+ const D val;
+ };
+
+ //////////////////////////////////////////////////////////////
+ // BrdReplicate
+
+ template <typename D> struct BrdRowReplicate
+ {
+ typedef D result_type;
+
+ explicit __host__ __device__ __forceinline__ BrdRowReplicate(int width) : last_col(width - 1) {}
+ template <typename U> __host__ __device__ __forceinline__ BrdRowReplicate(int width, U) : last_col(width - 1) {}
+
+ __device__ __forceinline__ int idx_col_low(int x) const
+ {
+ return ::max(x, 0);
+ }
+
+ __device__ __forceinline__ int idx_col_high(int x) const
+ {
+ return ::min(x, last_col);
+ }
+
+ __device__ __forceinline__ int idx_col(int x) const
+ {
+ return idx_col_low(idx_col_high(x));
+ }
+
+ template <typename T> __device__ __forceinline__ D at_low(int x, const T* data) const
+ {
+ return saturate_cast<D>(data[idx_col_low(x)]);
+ }
+
+ template <typename T> __device__ __forceinline__ D at_high(int x, const T* data) const
+ {
+ return saturate_cast<D>(data[idx_col_high(x)]);
+ }
+
+ template <typename T> __device__ __forceinline__ D at(int x, const T* data) const
+ {
+ return saturate_cast<D>(data[idx_col(x)]);
+ }
+
+ const int last_col;
+ };
+
+ template <typename D> struct BrdColReplicate
+ {
+ typedef D result_type;
+
+ explicit __host__ __device__ __forceinline__ BrdColReplicate(int height) : last_row(height - 1) {}
+ template <typename U> __host__ __device__ __forceinline__ BrdColReplicate(int height, U) : last_row(height - 1) {}
+
+ __device__ __forceinline__ int idx_row_low(int y) const
+ {
+ return ::max(y, 0);
+ }
+
+ __device__ __forceinline__ int idx_row_high(int y) const
+ {
+ return ::min(y, last_row);
+ }
+
+ __device__ __forceinline__ int idx_row(int y) const
+ {
+ return idx_row_low(idx_row_high(y));
+ }
+
+ template <typename T> __device__ __forceinline__ D at_low(int y, const T* data, size_t step) const
+ {
+ return saturate_cast<D>(*(const T*)((const char*)data + idx_row_low(y) * step));
+ }
+
+ template <typename T> __device__ __forceinline__ D at_high(int y, const T* data, size_t step) const
+ {
+ return saturate_cast<D>(*(const T*)((const char*)data + idx_row_high(y) * step));
+ }
+
+ template <typename T> __device__ __forceinline__ D at(int y, const T* data, size_t step) const
+ {
+ return saturate_cast<D>(*(const T*)((const char*)data + idx_row(y) * step));
+ }
+
+ const int last_row;
+ };
+
+ template <typename D> struct BrdReplicate
+ {
+ typedef D result_type;
+
+ __host__ __device__ __forceinline__ BrdReplicate(int height, int width) : last_row(height - 1), last_col(width - 1) {}
+ template <typename U> __host__ __device__ __forceinline__ BrdReplicate(int height, int width, U) : last_row(height - 1), last_col(width - 1) {}
+
+ __device__ __forceinline__ int idx_row_low(int y) const
+ {
+ return ::max(y, 0);
+ }
+
+ __device__ __forceinline__ int idx_row_high(int y) const
+ {
+ return ::min(y, last_row);
+ }
+
+ __device__ __forceinline__ int idx_row(int y) const
+ {
+ return idx_row_low(idx_row_high(y));
+ }
+
+ __device__ __forceinline__ int idx_col_low(int x) const
+ {
+ return ::max(x, 0);
+ }
+
+ __device__ __forceinline__ int idx_col_high(int x) const
+ {
+ return ::min(x, last_col);
+ }
+
+ __device__ __forceinline__ int idx_col(int x) const
+ {
+ return idx_col_low(idx_col_high(x));
+ }
+
+ template <typename T> __device__ __forceinline__ D at(int y, int x, const T* data, size_t step) const
+ {
+ return saturate_cast<D>(((const T*)((const char*)data + idx_row(y) * step))[idx_col(x)]);
+ }
+
+ template <typename Ptr2D> __device__ __forceinline__ D at(typename Ptr2D::index_type y, typename Ptr2D::index_type x, const Ptr2D& src) const
+ {
+ return saturate_cast<D>(src(idx_row(y), idx_col(x)));
+ }
+
+ const int last_row;
+ const int last_col;
+ };
+
+ //////////////////////////////////////////////////////////////
+ // BrdReflect101
+
+ template <typename D> struct BrdRowReflect101
+ {
+ typedef D result_type;
+
+ explicit __host__ __device__ __forceinline__ BrdRowReflect101(int width) : last_col(width - 1) {}
+ template <typename U> __host__ __device__ __forceinline__ BrdRowReflect101(int width, U) : last_col(width - 1) {}
+
+ __device__ __forceinline__ int idx_col_low(int x) const
+ {
+ return ::abs(x) % (last_col + 1);
+ }
+
+ __device__ __forceinline__ int idx_col_high(int x) const
+ {
+ return ::abs(last_col - ::abs(last_col - x)) % (last_col + 1);
+ }
+
+ __device__ __forceinline__ int idx_col(int x) const
+ {
+ return idx_col_low(idx_col_high(x));
+ }
+
+ template <typename T> __device__ __forceinline__ D at_low(int x, const T* data) const
+ {
+ return saturate_cast<D>(data[idx_col_low(x)]);
+ }
+
+ template <typename T> __device__ __forceinline__ D at_high(int x, const T* data) const
+ {
+ return saturate_cast<D>(data[idx_col_high(x)]);
+ }
+
+ template <typename T> __device__ __forceinline__ D at(int x, const T* data) const
+ {
+ return saturate_cast<D>(data[idx_col(x)]);
+ }
+
+ const int last_col;
+ };
+
+ template <typename D> struct BrdColReflect101
+ {
+ typedef D result_type;
+
+ explicit __host__ __device__ __forceinline__ BrdColReflect101(int height) : last_row(height - 1) {}
+ template <typename U> __host__ __device__ __forceinline__ BrdColReflect101(int height, U) : last_row(height - 1) {}
+
+ __device__ __forceinline__ int idx_row_low(int y) const
+ {
+ return ::abs(y) % (last_row + 1);
+ }
+
+ __device__ __forceinline__ int idx_row_high(int y) const
+ {
+ return ::abs(last_row - ::abs(last_row - y)) % (last_row + 1);
+ }
+
+ __device__ __forceinline__ int idx_row(int y) const
+ {
+ return idx_row_low(idx_row_high(y));
+ }
+
+ template <typename T> __device__ __forceinline__ D at_low(int y, const T* data, size_t step) const
+ {
+ return saturate_cast<D>(*(const D*)((const char*)data + idx_row_low(y) * step));
+ }
+
+ template <typename T> __device__ __forceinline__ D at_high(int y, const T* data, size_t step) const
+ {
+ return saturate_cast<D>(*(const D*)((const char*)data + idx_row_high(y) * step));
+ }
+
+ template <typename T> __device__ __forceinline__ D at(int y, const T* data, size_t step) const
+ {
+ return saturate_cast<D>(*(const D*)((const char*)data + idx_row(y) * step));
+ }
+
+ const int last_row;
+ };
+
+ template <typename D> struct BrdReflect101
+ {
+ typedef D result_type;
+
+ __host__ __device__ __forceinline__ BrdReflect101(int height, int width) : last_row(height - 1), last_col(width - 1) {}
+ template <typename U> __host__ __device__ __forceinline__ BrdReflect101(int height, int width, U) : last_row(height - 1), last_col(width - 1) {}
+
+ __device__ __forceinline__ int idx_row_low(int y) const
+ {
+ return ::abs(y) % (last_row + 1);
+ }
+
+ __device__ __forceinline__ int idx_row_high(int y) const
+ {
+ return ::abs(last_row - ::abs(last_row - y)) % (last_row + 1);
+ }
+
+ __device__ __forceinline__ int idx_row(int y) const
+ {
+ return idx_row_low(idx_row_high(y));
+ }
+
+ __device__ __forceinline__ int idx_col_low(int x) const
+ {
+ return ::abs(x) % (last_col + 1);
+ }
+
+ __device__ __forceinline__ int idx_col_high(int x) const
+ {
+ return ::abs(last_col - ::abs(last_col - x)) % (last_col + 1);
+ }
+
+ __device__ __forceinline__ int idx_col(int x) const
+ {
+ return idx_col_low(idx_col_high(x));
+ }
+
+ template <typename T> __device__ __forceinline__ D at(int y, int x, const T* data, size_t step) const
+ {
+ return saturate_cast<D>(((const T*)((const char*)data + idx_row(y) * step))[idx_col(x)]);
+ }
+
+ template <typename Ptr2D> __device__ __forceinline__ D at(typename Ptr2D::index_type y, typename Ptr2D::index_type x, const Ptr2D& src) const
+ {
+ return saturate_cast<D>(src(idx_row(y), idx_col(x)));
+ }
+
+ const int last_row;
+ const int last_col;
+ };
+
+ //////////////////////////////////////////////////////////////
+ // BrdReflect
+
+ template <typename D> struct BrdRowReflect
+ {
+ typedef D result_type;
+
+ explicit __host__ __device__ __forceinline__ BrdRowReflect(int width) : last_col(width - 1) {}
+ template <typename U> __host__ __device__ __forceinline__ BrdRowReflect(int width, U) : last_col(width - 1) {}
+
+ __device__ __forceinline__ int idx_col_low(int x) const
+ {
+ return (::abs(x) - (x < 0)) % (last_col + 1);
+ }
+
+ __device__ __forceinline__ int idx_col_high(int x) const
+ {
+ return ::abs(last_col - ::abs(last_col - x) + (x > last_col)) % (last_col + 1);
+ }
+
+ __device__ __forceinline__ int idx_col(int x) const
+ {
+ return idx_col_high(::abs(x) - (x < 0));
+ }
+
+ template <typename T> __device__ __forceinline__ D at_low(int x, const T* data) const
+ {
+ return saturate_cast<D>(data[idx_col_low(x)]);
+ }
+
+ template <typename T> __device__ __forceinline__ D at_high(int x, const T* data) const
+ {
+ return saturate_cast<D>(data[idx_col_high(x)]);
+ }
+
+ template <typename T> __device__ __forceinline__ D at(int x, const T* data) const
+ {
+ return saturate_cast<D>(data[idx_col(x)]);
+ }
+
+ const int last_col;
+ };
+
+ template <typename D> struct BrdColReflect
+ {
+ typedef D result_type;
+
+ explicit __host__ __device__ __forceinline__ BrdColReflect(int height) : last_row(height - 1) {}
+ template <typename U> __host__ __device__ __forceinline__ BrdColReflect(int height, U) : last_row(height - 1) {}
+
+ __device__ __forceinline__ int idx_row_low(int y) const
+ {
+ return (::abs(y) - (y < 0)) % (last_row + 1);
+ }
+
+ __device__ __forceinline__ int idx_row_high(int y) const
+ {
+ return ::abs(last_row - ::abs(last_row - y) + (y > last_row)) % (last_row + 1);
+ }
+
+ __device__ __forceinline__ int idx_row(int y) const
+ {
+ return idx_row_high(::abs(y) - (y < 0));
+ }
+
+ template <typename T> __device__ __forceinline__ D at_low(int y, const T* data, size_t step) const
+ {
+ return saturate_cast<D>(*(const D*)((const char*)data + idx_row_low(y) * step));
+ }
+
+ template <typename T> __device__ __forceinline__ D at_high(int y, const T* data, size_t step) const
+ {
+ return saturate_cast<D>(*(const D*)((const char*)data + idx_row_high(y) * step));
+ }
+
+ template <typename T> __device__ __forceinline__ D at(int y, const T* data, size_t step) const
+ {
+ return saturate_cast<D>(*(const D*)((const char*)data + idx_row(y) * step));
+ }
+
+ const int last_row;
+ };
+
+ template <typename D> struct BrdReflect
+ {
+ typedef D result_type;
+
+ __host__ __device__ __forceinline__ BrdReflect(int height, int width) : last_row(height - 1), last_col(width - 1) {}
+ template <typename U> __host__ __device__ __forceinline__ BrdReflect(int height, int width, U) : last_row(height - 1), last_col(width - 1) {}
+
+ __device__ __forceinline__ int idx_row_low(int y) const
+ {
+ return (::abs(y) - (y < 0)) % (last_row + 1);
+ }
+
+ __device__ __forceinline__ int idx_row_high(int y) const
+ {
+ return /*::abs*/(last_row - ::abs(last_row - y) + (y > last_row)) /*% (last_row + 1)*/;
+ }
+
+ __device__ __forceinline__ int idx_row(int y) const
+ {
+ return idx_row_low(idx_row_high(y));
+ }
+
+ __device__ __forceinline__ int idx_col_low(int x) const
+ {
+ return (::abs(x) - (x < 0)) % (last_col + 1);
+ }
+
+ __device__ __forceinline__ int idx_col_high(int x) const
+ {
+ return (last_col - ::abs(last_col - x) + (x > last_col));
+ }
+
+ __device__ __forceinline__ int idx_col(int x) const
+ {
+ return idx_col_low(idx_col_high(x));
+ }
+
+ template <typename T> __device__ __forceinline__ D at(int y, int x, const T* data, size_t step) const
+ {
+ return saturate_cast<D>(((const T*)((const char*)data + idx_row(y) * step))[idx_col(x)]);
+ }
+
+ template <typename Ptr2D> __device__ __forceinline__ D at(typename Ptr2D::index_type y, typename Ptr2D::index_type x, const Ptr2D& src) const
+ {
+ return saturate_cast<D>(src(idx_row(y), idx_col(x)));
+ }
+
+ const int last_row;
+ const int last_col;
+ };
+
+ //////////////////////////////////////////////////////////////
+ // BrdWrap
+
+ template <typename D> struct BrdRowWrap
+ {
+ typedef D result_type;
+
+ explicit __host__ __device__ __forceinline__ BrdRowWrap(int width_) : width(width_) {}
+ template <typename U> __host__ __device__ __forceinline__ BrdRowWrap(int width_, U) : width(width_) {}
+
+ __device__ __forceinline__ int idx_col_low(int x) const
+ {
+ return (x >= 0) * x + (x < 0) * (x - ((x - width + 1) / width) * width);
+ }
+
+ __device__ __forceinline__ int idx_col_high(int x) const
+ {
+ return (x < width) * x + (x >= width) * (x % width);
+ }
+
+ __device__ __forceinline__ int idx_col(int x) const
+ {
+ return idx_col_high(idx_col_low(x));
+ }
+
+ template <typename T> __device__ __forceinline__ D at_low(int x, const T* data) const
+ {
+ return saturate_cast<D>(data[idx_col_low(x)]);
+ }
+
+ template <typename T> __device__ __forceinline__ D at_high(int x, const T* data) const
+ {
+ return saturate_cast<D>(data[idx_col_high(x)]);
+ }
+
+ template <typename T> __device__ __forceinline__ D at(int x, const T* data) const
+ {
+ return saturate_cast<D>(data[idx_col(x)]);
+ }
+
+ const int width;
+ };
+
+ template <typename D> struct BrdColWrap
+ {
+ typedef D result_type;
+
+ explicit __host__ __device__ __forceinline__ BrdColWrap(int height_) : height(height_) {}
+ template <typename U> __host__ __device__ __forceinline__ BrdColWrap(int height_, U) : height(height_) {}
+
+ __device__ __forceinline__ int idx_row_low(int y) const
+ {
+ return (y >= 0) * y + (y < 0) * (y - ((y - height + 1) / height) * height);
+ }
+
+ __device__ __forceinline__ int idx_row_high(int y) const
+ {
+ return (y < height) * y + (y >= height) * (y % height);
+ }
+
+ __device__ __forceinline__ int idx_row(int y) const
+ {
+ return idx_row_high(idx_row_low(y));
+ }
+
+ template <typename T> __device__ __forceinline__ D at_low(int y, const T* data, size_t step) const
+ {
+ return saturate_cast<D>(*(const D*)((const char*)data + idx_row_low(y) * step));
+ }
+
+ template <typename T> __device__ __forceinline__ D at_high(int y, const T* data, size_t step) const
+ {
+ return saturate_cast<D>(*(const D*)((const char*)data + idx_row_high(y) * step));
+ }
+
+ template <typename T> __device__ __forceinline__ D at(int y, const T* data, size_t step) const
+ {
+ return saturate_cast<D>(*(const D*)((const char*)data + idx_row(y) * step));
+ }
+
+ const int height;
+ };
+
+ template <typename D> struct BrdWrap
+ {
+ typedef D result_type;
+
+ __host__ __device__ __forceinline__ BrdWrap(int height_, int width_) :
+ height(height_), width(width_)
+ {
+ }
+ template <typename U>
+ __host__ __device__ __forceinline__ BrdWrap(int height_, int width_, U) :
+ height(height_), width(width_)
+ {
+ }
+
+ __device__ __forceinline__ int idx_row_low(int y) const
+ {
+ return (y >= 0) ? y : (y - ((y - height + 1) / height) * height);
+ }
+
+ __device__ __forceinline__ int idx_row_high(int y) const
+ {
+ return (y < height) ? y : (y % height);
+ }
+
+ __device__ __forceinline__ int idx_row(int y) const
+ {
+ return idx_row_high(idx_row_low(y));
+ }
+
+ __device__ __forceinline__ int idx_col_low(int x) const
+ {
+ return (x >= 0) ? x : (x - ((x - width + 1) / width) * width);
+ }
+
+ __device__ __forceinline__ int idx_col_high(int x) const
+ {
+ return (x < width) ? x : (x % width);
+ }
+
+ __device__ __forceinline__ int idx_col(int x) const
+ {
+ return idx_col_high(idx_col_low(x));
+ }
+
+ template <typename T> __device__ __forceinline__ D at(int y, int x, const T* data, size_t step) const
+ {
+ return saturate_cast<D>(((const T*)((const char*)data + idx_row(y) * step))[idx_col(x)]);
+ }
+
+ template <typename Ptr2D> __device__ __forceinline__ D at(typename Ptr2D::index_type y, typename Ptr2D::index_type x, const Ptr2D& src) const
+ {
+ return saturate_cast<D>(src(idx_row(y), idx_col(x)));
+ }
+
+ const int height;
+ const int width;
+ };
+
+ //////////////////////////////////////////////////////////////
+ // BorderReader
+
+ template <typename Ptr2D, typename B> struct BorderReader
+ {
+ typedef typename B::result_type elem_type;
+ typedef typename Ptr2D::index_type index_type;
+
+ __host__ __device__ __forceinline__ BorderReader(const Ptr2D& ptr_, const B& b_) : ptr(ptr_), b(b_) {}
+
+ __device__ __forceinline__ elem_type operator ()(index_type y, index_type x) const
+ {
+ return b.at(y, x, ptr);
+ }
+
+ const Ptr2D ptr;
+ const B b;
+ };
+
+ // under win32 there is some bug with templated types that passed as kernel parameters
+ // with this specialization all works fine
+ template <typename Ptr2D, typename D> struct BorderReader< Ptr2D, BrdConstant<D> >
+ {
+ typedef typename BrdConstant<D>::result_type elem_type;
+ typedef typename Ptr2D::index_type index_type;
+
+ __host__ __device__ __forceinline__ BorderReader(const Ptr2D& src_, const BrdConstant<D>& b) :
+ src(src_), height(b.height), width(b.width), val(b.val)
+ {
+ }
+
+ __device__ __forceinline__ D operator ()(index_type y, index_type x) const
+ {
+ return (x >= 0 && x < width && y >= 0 && y < height) ? saturate_cast<D>(src(y, x)) : val;
+ }
+
+ const Ptr2D src;
+ const int height;
+ const int width;
+ const D val;
+ };
+}}} // namespace cv { namespace gpu { namespace device
+
+#endif // __OPENCV_GPU_BORDER_INTERPOLATE_HPP__
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/color.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/color.hpp
new file mode 100644
index 00000000..5af64bf6
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/color.hpp
@@ -0,0 +1,301 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_GPU_COLOR_HPP__
+#define __OPENCV_GPU_COLOR_HPP__
+
+#include "detail/color_detail.hpp"
+
+namespace cv { namespace gpu { namespace device
+{
+ // All OPENCV_GPU_IMPLEMENT_*_TRAITS(ColorSpace1_to_ColorSpace2, ...) macros implements
+ // template <typename T> class ColorSpace1_to_ColorSpace2_traits
+ // {
+ // typedef ... functor_type;
+ // static __host__ __device__ functor_type create_functor();
+ // };
+
+ OPENCV_GPU_IMPLEMENT_RGB2RGB_TRAITS(bgr_to_rgb, 3, 3, 2)
+ OPENCV_GPU_IMPLEMENT_RGB2RGB_TRAITS(bgr_to_bgra, 3, 4, 0)
+ OPENCV_GPU_IMPLEMENT_RGB2RGB_TRAITS(bgr_to_rgba, 3, 4, 2)
+ OPENCV_GPU_IMPLEMENT_RGB2RGB_TRAITS(bgra_to_bgr, 4, 3, 0)
+ OPENCV_GPU_IMPLEMENT_RGB2RGB_TRAITS(bgra_to_rgb, 4, 3, 2)
+ OPENCV_GPU_IMPLEMENT_RGB2RGB_TRAITS(bgra_to_rgba, 4, 4, 2)
+
+ #undef OPENCV_GPU_IMPLEMENT_RGB2RGB_TRAITS
+
+ OPENCV_GPU_IMPLEMENT_RGB2RGB5x5_TRAITS(bgr_to_bgr555, 3, 0, 5)
+ OPENCV_GPU_IMPLEMENT_RGB2RGB5x5_TRAITS(bgr_to_bgr565, 3, 0, 6)
+ OPENCV_GPU_IMPLEMENT_RGB2RGB5x5_TRAITS(rgb_to_bgr555, 3, 2, 5)
+ OPENCV_GPU_IMPLEMENT_RGB2RGB5x5_TRAITS(rgb_to_bgr565, 3, 2, 6)
+ OPENCV_GPU_IMPLEMENT_RGB2RGB5x5_TRAITS(bgra_to_bgr555, 4, 0, 5)
+ OPENCV_GPU_IMPLEMENT_RGB2RGB5x5_TRAITS(bgra_to_bgr565, 4, 0, 6)
+ OPENCV_GPU_IMPLEMENT_RGB2RGB5x5_TRAITS(rgba_to_bgr555, 4, 2, 5)
+ OPENCV_GPU_IMPLEMENT_RGB2RGB5x5_TRAITS(rgba_to_bgr565, 4, 2, 6)
+
+ #undef OPENCV_GPU_IMPLEMENT_RGB2RGB5x5_TRAITS
+
+ OPENCV_GPU_IMPLEMENT_RGB5x52RGB_TRAITS(bgr555_to_rgb, 3, 2, 5)
+ OPENCV_GPU_IMPLEMENT_RGB5x52RGB_TRAITS(bgr565_to_rgb, 3, 2, 6)
+ OPENCV_GPU_IMPLEMENT_RGB5x52RGB_TRAITS(bgr555_to_bgr, 3, 0, 5)
+ OPENCV_GPU_IMPLEMENT_RGB5x52RGB_TRAITS(bgr565_to_bgr, 3, 0, 6)
+ OPENCV_GPU_IMPLEMENT_RGB5x52RGB_TRAITS(bgr555_to_rgba, 4, 2, 5)
+ OPENCV_GPU_IMPLEMENT_RGB5x52RGB_TRAITS(bgr565_to_rgba, 4, 2, 6)
+ OPENCV_GPU_IMPLEMENT_RGB5x52RGB_TRAITS(bgr555_to_bgra, 4, 0, 5)
+ OPENCV_GPU_IMPLEMENT_RGB5x52RGB_TRAITS(bgr565_to_bgra, 4, 0, 6)
+
+ #undef OPENCV_GPU_IMPLEMENT_RGB5x52RGB_TRAITS
+
+ OPENCV_GPU_IMPLEMENT_GRAY2RGB_TRAITS(gray_to_bgr, 3)
+ OPENCV_GPU_IMPLEMENT_GRAY2RGB_TRAITS(gray_to_bgra, 4)
+
+ #undef OPENCV_GPU_IMPLEMENT_GRAY2RGB_TRAITS
+
+ OPENCV_GPU_IMPLEMENT_GRAY2RGB5x5_TRAITS(gray_to_bgr555, 5)
+ OPENCV_GPU_IMPLEMENT_GRAY2RGB5x5_TRAITS(gray_to_bgr565, 6)
+
+ #undef OPENCV_GPU_IMPLEMENT_GRAY2RGB5x5_TRAITS
+
+ OPENCV_GPU_IMPLEMENT_RGB5x52GRAY_TRAITS(bgr555_to_gray, 5)
+ OPENCV_GPU_IMPLEMENT_RGB5x52GRAY_TRAITS(bgr565_to_gray, 6)
+
+ #undef OPENCV_GPU_IMPLEMENT_RGB5x52GRAY_TRAITS
+
+ OPENCV_GPU_IMPLEMENT_RGB2GRAY_TRAITS(rgb_to_gray, 3, 2)
+ OPENCV_GPU_IMPLEMENT_RGB2GRAY_TRAITS(bgr_to_gray, 3, 0)
+ OPENCV_GPU_IMPLEMENT_RGB2GRAY_TRAITS(rgba_to_gray, 4, 2)
+ OPENCV_GPU_IMPLEMENT_RGB2GRAY_TRAITS(bgra_to_gray, 4, 0)
+
+ #undef OPENCV_GPU_IMPLEMENT_RGB2GRAY_TRAITS
+
+ OPENCV_GPU_IMPLEMENT_RGB2YUV_TRAITS(rgb_to_yuv, 3, 3, 2)
+ OPENCV_GPU_IMPLEMENT_RGB2YUV_TRAITS(rgba_to_yuv, 4, 3, 2)
+ OPENCV_GPU_IMPLEMENT_RGB2YUV_TRAITS(rgb_to_yuv4, 3, 4, 2)
+ OPENCV_GPU_IMPLEMENT_RGB2YUV_TRAITS(rgba_to_yuv4, 4, 4, 2)
+ OPENCV_GPU_IMPLEMENT_RGB2YUV_TRAITS(bgr_to_yuv, 3, 3, 0)
+ OPENCV_GPU_IMPLEMENT_RGB2YUV_TRAITS(bgra_to_yuv, 4, 3, 0)
+ OPENCV_GPU_IMPLEMENT_RGB2YUV_TRAITS(bgr_to_yuv4, 3, 4, 0)
+ OPENCV_GPU_IMPLEMENT_RGB2YUV_TRAITS(bgra_to_yuv4, 4, 4, 0)
+
+ #undef OPENCV_GPU_IMPLEMENT_RGB2YUV_TRAITS
+
+ OPENCV_GPU_IMPLEMENT_YUV2RGB_TRAITS(yuv_to_rgb, 3, 3, 2)
+ OPENCV_GPU_IMPLEMENT_YUV2RGB_TRAITS(yuv_to_rgba, 3, 4, 2)
+ OPENCV_GPU_IMPLEMENT_YUV2RGB_TRAITS(yuv4_to_rgb, 4, 3, 2)
+ OPENCV_GPU_IMPLEMENT_YUV2RGB_TRAITS(yuv4_to_rgba, 4, 4, 2)
+ OPENCV_GPU_IMPLEMENT_YUV2RGB_TRAITS(yuv_to_bgr, 3, 3, 0)
+ OPENCV_GPU_IMPLEMENT_YUV2RGB_TRAITS(yuv_to_bgra, 3, 4, 0)
+ OPENCV_GPU_IMPLEMENT_YUV2RGB_TRAITS(yuv4_to_bgr, 4, 3, 0)
+ OPENCV_GPU_IMPLEMENT_YUV2RGB_TRAITS(yuv4_to_bgra, 4, 4, 0)
+
+ #undef OPENCV_GPU_IMPLEMENT_YUV2RGB_TRAITS
+
+ OPENCV_GPU_IMPLEMENT_RGB2YCrCb_TRAITS(rgb_to_YCrCb, 3, 3, 2)
+ OPENCV_GPU_IMPLEMENT_RGB2YCrCb_TRAITS(rgba_to_YCrCb, 4, 3, 2)
+ OPENCV_GPU_IMPLEMENT_RGB2YCrCb_TRAITS(rgb_to_YCrCb4, 3, 4, 2)
+ OPENCV_GPU_IMPLEMENT_RGB2YCrCb_TRAITS(rgba_to_YCrCb4, 4, 4, 2)
+ OPENCV_GPU_IMPLEMENT_RGB2YCrCb_TRAITS(bgr_to_YCrCb, 3, 3, 0)
+ OPENCV_GPU_IMPLEMENT_RGB2YCrCb_TRAITS(bgra_to_YCrCb, 4, 3, 0)
+ OPENCV_GPU_IMPLEMENT_RGB2YCrCb_TRAITS(bgr_to_YCrCb4, 3, 4, 0)
+ OPENCV_GPU_IMPLEMENT_RGB2YCrCb_TRAITS(bgra_to_YCrCb4, 4, 4, 0)
+
+ #undef OPENCV_GPU_IMPLEMENT_RGB2YCrCb_TRAITS
+
+ OPENCV_GPU_IMPLEMENT_YCrCb2RGB_TRAITS(YCrCb_to_rgb, 3, 3, 2)
+ OPENCV_GPU_IMPLEMENT_YCrCb2RGB_TRAITS(YCrCb_to_rgba, 3, 4, 2)
+ OPENCV_GPU_IMPLEMENT_YCrCb2RGB_TRAITS(YCrCb4_to_rgb, 4, 3, 2)
+ OPENCV_GPU_IMPLEMENT_YCrCb2RGB_TRAITS(YCrCb4_to_rgba, 4, 4, 2)
+ OPENCV_GPU_IMPLEMENT_YCrCb2RGB_TRAITS(YCrCb_to_bgr, 3, 3, 0)
+ OPENCV_GPU_IMPLEMENT_YCrCb2RGB_TRAITS(YCrCb_to_bgra, 3, 4, 0)
+ OPENCV_GPU_IMPLEMENT_YCrCb2RGB_TRAITS(YCrCb4_to_bgr, 4, 3, 0)
+ OPENCV_GPU_IMPLEMENT_YCrCb2RGB_TRAITS(YCrCb4_to_bgra, 4, 4, 0)
+
+ #undef OPENCV_GPU_IMPLEMENT_YCrCb2RGB_TRAITS
+
+ OPENCV_GPU_IMPLEMENT_RGB2XYZ_TRAITS(rgb_to_xyz, 3, 3, 2)
+ OPENCV_GPU_IMPLEMENT_RGB2XYZ_TRAITS(rgba_to_xyz, 4, 3, 2)
+ OPENCV_GPU_IMPLEMENT_RGB2XYZ_TRAITS(rgb_to_xyz4, 3, 4, 2)
+ OPENCV_GPU_IMPLEMENT_RGB2XYZ_TRAITS(rgba_to_xyz4, 4, 4, 2)
+ OPENCV_GPU_IMPLEMENT_RGB2XYZ_TRAITS(bgr_to_xyz, 3, 3, 0)
+ OPENCV_GPU_IMPLEMENT_RGB2XYZ_TRAITS(bgra_to_xyz, 4, 3, 0)
+ OPENCV_GPU_IMPLEMENT_RGB2XYZ_TRAITS(bgr_to_xyz4, 3, 4, 0)
+ OPENCV_GPU_IMPLEMENT_RGB2XYZ_TRAITS(bgra_to_xyz4, 4, 4, 0)
+
+ #undef OPENCV_GPU_IMPLEMENT_RGB2XYZ_TRAITS
+
+ OPENCV_GPU_IMPLEMENT_XYZ2RGB_TRAITS(xyz_to_rgb, 3, 3, 2)
+ OPENCV_GPU_IMPLEMENT_XYZ2RGB_TRAITS(xyz4_to_rgb, 4, 3, 2)
+ OPENCV_GPU_IMPLEMENT_XYZ2RGB_TRAITS(xyz_to_rgba, 3, 4, 2)
+ OPENCV_GPU_IMPLEMENT_XYZ2RGB_TRAITS(xyz4_to_rgba, 4, 4, 2)
+ OPENCV_GPU_IMPLEMENT_XYZ2RGB_TRAITS(xyz_to_bgr, 3, 3, 0)
+ OPENCV_GPU_IMPLEMENT_XYZ2RGB_TRAITS(xyz4_to_bgr, 4, 3, 0)
+ OPENCV_GPU_IMPLEMENT_XYZ2RGB_TRAITS(xyz_to_bgra, 3, 4, 0)
+ OPENCV_GPU_IMPLEMENT_XYZ2RGB_TRAITS(xyz4_to_bgra, 4, 4, 0)
+
+ #undef OPENCV_GPU_IMPLEMENT_XYZ2RGB_TRAITS
+
+ OPENCV_GPU_IMPLEMENT_RGB2HSV_TRAITS(rgb_to_hsv, 3, 3, 2)
+ OPENCV_GPU_IMPLEMENT_RGB2HSV_TRAITS(rgba_to_hsv, 4, 3, 2)
+ OPENCV_GPU_IMPLEMENT_RGB2HSV_TRAITS(rgb_to_hsv4, 3, 4, 2)
+ OPENCV_GPU_IMPLEMENT_RGB2HSV_TRAITS(rgba_to_hsv4, 4, 4, 2)
+ OPENCV_GPU_IMPLEMENT_RGB2HSV_TRAITS(bgr_to_hsv, 3, 3, 0)
+ OPENCV_GPU_IMPLEMENT_RGB2HSV_TRAITS(bgra_to_hsv, 4, 3, 0)
+ OPENCV_GPU_IMPLEMENT_RGB2HSV_TRAITS(bgr_to_hsv4, 3, 4, 0)
+ OPENCV_GPU_IMPLEMENT_RGB2HSV_TRAITS(bgra_to_hsv4, 4, 4, 0)
+
+ #undef OPENCV_GPU_IMPLEMENT_RGB2HSV_TRAITS
+
+ OPENCV_GPU_IMPLEMENT_HSV2RGB_TRAITS(hsv_to_rgb, 3, 3, 2)
+ OPENCV_GPU_IMPLEMENT_HSV2RGB_TRAITS(hsv_to_rgba, 3, 4, 2)
+ OPENCV_GPU_IMPLEMENT_HSV2RGB_TRAITS(hsv4_to_rgb, 4, 3, 2)
+ OPENCV_GPU_IMPLEMENT_HSV2RGB_TRAITS(hsv4_to_rgba, 4, 4, 2)
+ OPENCV_GPU_IMPLEMENT_HSV2RGB_TRAITS(hsv_to_bgr, 3, 3, 0)
+ OPENCV_GPU_IMPLEMENT_HSV2RGB_TRAITS(hsv_to_bgra, 3, 4, 0)
+ OPENCV_GPU_IMPLEMENT_HSV2RGB_TRAITS(hsv4_to_bgr, 4, 3, 0)
+ OPENCV_GPU_IMPLEMENT_HSV2RGB_TRAITS(hsv4_to_bgra, 4, 4, 0)
+
+ #undef OPENCV_GPU_IMPLEMENT_HSV2RGB_TRAITS
+
+ OPENCV_GPU_IMPLEMENT_RGB2HLS_TRAITS(rgb_to_hls, 3, 3, 2)
+ OPENCV_GPU_IMPLEMENT_RGB2HLS_TRAITS(rgba_to_hls, 4, 3, 2)
+ OPENCV_GPU_IMPLEMENT_RGB2HLS_TRAITS(rgb_to_hls4, 3, 4, 2)
+ OPENCV_GPU_IMPLEMENT_RGB2HLS_TRAITS(rgba_to_hls4, 4, 4, 2)
+ OPENCV_GPU_IMPLEMENT_RGB2HLS_TRAITS(bgr_to_hls, 3, 3, 0)
+ OPENCV_GPU_IMPLEMENT_RGB2HLS_TRAITS(bgra_to_hls, 4, 3, 0)
+ OPENCV_GPU_IMPLEMENT_RGB2HLS_TRAITS(bgr_to_hls4, 3, 4, 0)
+ OPENCV_GPU_IMPLEMENT_RGB2HLS_TRAITS(bgra_to_hls4, 4, 4, 0)
+
+ #undef OPENCV_GPU_IMPLEMENT_RGB2HLS_TRAITS
+
+ OPENCV_GPU_IMPLEMENT_HLS2RGB_TRAITS(hls_to_rgb, 3, 3, 2)
+ OPENCV_GPU_IMPLEMENT_HLS2RGB_TRAITS(hls_to_rgba, 3, 4, 2)
+ OPENCV_GPU_IMPLEMENT_HLS2RGB_TRAITS(hls4_to_rgb, 4, 3, 2)
+ OPENCV_GPU_IMPLEMENT_HLS2RGB_TRAITS(hls4_to_rgba, 4, 4, 2)
+ OPENCV_GPU_IMPLEMENT_HLS2RGB_TRAITS(hls_to_bgr, 3, 3, 0)
+ OPENCV_GPU_IMPLEMENT_HLS2RGB_TRAITS(hls_to_bgra, 3, 4, 0)
+ OPENCV_GPU_IMPLEMENT_HLS2RGB_TRAITS(hls4_to_bgr, 4, 3, 0)
+ OPENCV_GPU_IMPLEMENT_HLS2RGB_TRAITS(hls4_to_bgra, 4, 4, 0)
+
+ #undef OPENCV_GPU_IMPLEMENT_HLS2RGB_TRAITS
+
+ OPENCV_GPU_IMPLEMENT_RGB2Lab_TRAITS(rgb_to_lab, 3, 3, true, 2)
+ OPENCV_GPU_IMPLEMENT_RGB2Lab_TRAITS(rgba_to_lab, 4, 3, true, 2)
+ OPENCV_GPU_IMPLEMENT_RGB2Lab_TRAITS(rgb_to_lab4, 3, 4, true, 2)
+ OPENCV_GPU_IMPLEMENT_RGB2Lab_TRAITS(rgba_to_lab4, 4, 4, true, 2)
+ OPENCV_GPU_IMPLEMENT_RGB2Lab_TRAITS(bgr_to_lab, 3, 3, true, 0)
+ OPENCV_GPU_IMPLEMENT_RGB2Lab_TRAITS(bgra_to_lab, 4, 3, true, 0)
+ OPENCV_GPU_IMPLEMENT_RGB2Lab_TRAITS(bgr_to_lab4, 3, 4, true, 0)
+ OPENCV_GPU_IMPLEMENT_RGB2Lab_TRAITS(bgra_to_lab4, 4, 4, true, 0)
+
+ OPENCV_GPU_IMPLEMENT_RGB2Lab_TRAITS(lrgb_to_lab, 3, 3, false, 2)
+ OPENCV_GPU_IMPLEMENT_RGB2Lab_TRAITS(lrgba_to_lab, 4, 3, false, 2)
+ OPENCV_GPU_IMPLEMENT_RGB2Lab_TRAITS(lrgb_to_lab4, 3, 4, false, 2)
+ OPENCV_GPU_IMPLEMENT_RGB2Lab_TRAITS(lrgba_to_lab4, 4, 4, false, 2)
+ OPENCV_GPU_IMPLEMENT_RGB2Lab_TRAITS(lbgr_to_lab, 3, 3, false, 0)
+ OPENCV_GPU_IMPLEMENT_RGB2Lab_TRAITS(lbgra_to_lab, 4, 3, false, 0)
+ OPENCV_GPU_IMPLEMENT_RGB2Lab_TRAITS(lbgr_to_lab4, 3, 4, false, 0)
+ OPENCV_GPU_IMPLEMENT_RGB2Lab_TRAITS(lbgra_to_lab4, 4, 4, false, 0)
+
+ #undef OPENCV_GPU_IMPLEMENT_RGB2Lab_TRAITS
+
+ OPENCV_GPU_IMPLEMENT_Lab2RGB_TRAITS(lab_to_rgb, 3, 3, true, 2)
+ OPENCV_GPU_IMPLEMENT_Lab2RGB_TRAITS(lab4_to_rgb, 4, 3, true, 2)
+ OPENCV_GPU_IMPLEMENT_Lab2RGB_TRAITS(lab_to_rgba, 3, 4, true, 2)
+ OPENCV_GPU_IMPLEMENT_Lab2RGB_TRAITS(lab4_to_rgba, 4, 4, true, 2)
+ OPENCV_GPU_IMPLEMENT_Lab2RGB_TRAITS(lab_to_bgr, 3, 3, true, 0)
+ OPENCV_GPU_IMPLEMENT_Lab2RGB_TRAITS(lab4_to_bgr, 4, 3, true, 0)
+ OPENCV_GPU_IMPLEMENT_Lab2RGB_TRAITS(lab_to_bgra, 3, 4, true, 0)
+ OPENCV_GPU_IMPLEMENT_Lab2RGB_TRAITS(lab4_to_bgra, 4, 4, true, 0)
+
+ OPENCV_GPU_IMPLEMENT_Lab2RGB_TRAITS(lab_to_lrgb, 3, 3, false, 2)
+ OPENCV_GPU_IMPLEMENT_Lab2RGB_TRAITS(lab4_to_lrgb, 4, 3, false, 2)
+ OPENCV_GPU_IMPLEMENT_Lab2RGB_TRAITS(lab_to_lrgba, 3, 4, false, 2)
+ OPENCV_GPU_IMPLEMENT_Lab2RGB_TRAITS(lab4_to_lrgba, 4, 4, false, 2)
+ OPENCV_GPU_IMPLEMENT_Lab2RGB_TRAITS(lab_to_lbgr, 3, 3, false, 0)
+ OPENCV_GPU_IMPLEMENT_Lab2RGB_TRAITS(lab4_to_lbgr, 4, 3, false, 0)
+ OPENCV_GPU_IMPLEMENT_Lab2RGB_TRAITS(lab_to_lbgra, 3, 4, false, 0)
+ OPENCV_GPU_IMPLEMENT_Lab2RGB_TRAITS(lab4_to_lbgra, 4, 4, false, 0)
+
+ #undef OPENCV_GPU_IMPLEMENT_Lab2RGB_TRAITS
+
+ OPENCV_GPU_IMPLEMENT_RGB2Luv_TRAITS(rgb_to_luv, 3, 3, true, 2)
+ OPENCV_GPU_IMPLEMENT_RGB2Luv_TRAITS(rgba_to_luv, 4, 3, true, 2)
+ OPENCV_GPU_IMPLEMENT_RGB2Luv_TRAITS(rgb_to_luv4, 3, 4, true, 2)
+ OPENCV_GPU_IMPLEMENT_RGB2Luv_TRAITS(rgba_to_luv4, 4, 4, true, 2)
+ OPENCV_GPU_IMPLEMENT_RGB2Luv_TRAITS(bgr_to_luv, 3, 3, true, 0)
+ OPENCV_GPU_IMPLEMENT_RGB2Luv_TRAITS(bgra_to_luv, 4, 3, true, 0)
+ OPENCV_GPU_IMPLEMENT_RGB2Luv_TRAITS(bgr_to_luv4, 3, 4, true, 0)
+ OPENCV_GPU_IMPLEMENT_RGB2Luv_TRAITS(bgra_to_luv4, 4, 4, true, 0)
+
+ OPENCV_GPU_IMPLEMENT_RGB2Luv_TRAITS(lrgb_to_luv, 3, 3, false, 2)
+ OPENCV_GPU_IMPLEMENT_RGB2Luv_TRAITS(lrgba_to_luv, 4, 3, false, 2)
+ OPENCV_GPU_IMPLEMENT_RGB2Luv_TRAITS(lrgb_to_luv4, 3, 4, false, 2)
+ OPENCV_GPU_IMPLEMENT_RGB2Luv_TRAITS(lrgba_to_luv4, 4, 4, false, 2)
+ OPENCV_GPU_IMPLEMENT_RGB2Luv_TRAITS(lbgr_to_luv, 3, 3, false, 0)
+ OPENCV_GPU_IMPLEMENT_RGB2Luv_TRAITS(lbgra_to_luv, 4, 3, false, 0)
+ OPENCV_GPU_IMPLEMENT_RGB2Luv_TRAITS(lbgr_to_luv4, 3, 4, false, 0)
+ OPENCV_GPU_IMPLEMENT_RGB2Luv_TRAITS(lbgra_to_luv4, 4, 4, false, 0)
+
+ #undef OPENCV_GPU_IMPLEMENT_RGB2Luv_TRAITS
+
+ OPENCV_GPU_IMPLEMENT_Luv2RGB_TRAITS(luv_to_rgb, 3, 3, true, 2)
+ OPENCV_GPU_IMPLEMENT_Luv2RGB_TRAITS(luv4_to_rgb, 4, 3, true, 2)
+ OPENCV_GPU_IMPLEMENT_Luv2RGB_TRAITS(luv_to_rgba, 3, 4, true, 2)
+ OPENCV_GPU_IMPLEMENT_Luv2RGB_TRAITS(luv4_to_rgba, 4, 4, true, 2)
+ OPENCV_GPU_IMPLEMENT_Luv2RGB_TRAITS(luv_to_bgr, 3, 3, true, 0)
+ OPENCV_GPU_IMPLEMENT_Luv2RGB_TRAITS(luv4_to_bgr, 4, 3, true, 0)
+ OPENCV_GPU_IMPLEMENT_Luv2RGB_TRAITS(luv_to_bgra, 3, 4, true, 0)
+ OPENCV_GPU_IMPLEMENT_Luv2RGB_TRAITS(luv4_to_bgra, 4, 4, true, 0)
+
+ OPENCV_GPU_IMPLEMENT_Luv2RGB_TRAITS(luv_to_lrgb, 3, 3, false, 2)
+ OPENCV_GPU_IMPLEMENT_Luv2RGB_TRAITS(luv4_to_lrgb, 4, 3, false, 2)
+ OPENCV_GPU_IMPLEMENT_Luv2RGB_TRAITS(luv_to_lrgba, 3, 4, false, 2)
+ OPENCV_GPU_IMPLEMENT_Luv2RGB_TRAITS(luv4_to_lrgba, 4, 4, false, 2)
+ OPENCV_GPU_IMPLEMENT_Luv2RGB_TRAITS(luv_to_lbgr, 3, 3, false, 0)
+ OPENCV_GPU_IMPLEMENT_Luv2RGB_TRAITS(luv4_to_lbgr, 4, 3, false, 0)
+ OPENCV_GPU_IMPLEMENT_Luv2RGB_TRAITS(luv_to_lbgra, 3, 4, false, 0)
+ OPENCV_GPU_IMPLEMENT_Luv2RGB_TRAITS(luv4_to_lbgra, 4, 4, false, 0)
+
+ #undef OPENCV_GPU_IMPLEMENT_Luv2RGB_TRAITS
+}}} // namespace cv { namespace gpu { namespace device
+
+#endif // __OPENCV_GPU_BORDER_INTERPOLATE_HPP__
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/common.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/common.hpp
new file mode 100644
index 00000000..26a349ff
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/common.hpp
@@ -0,0 +1,118 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_GPU_COMMON_HPP__
+#define __OPENCV_GPU_COMMON_HPP__
+
+#include <cuda_runtime.h>
+#include "opencv2/core/cuda_devptrs.hpp"
+
+#ifndef CV_PI
+ #define CV_PI 3.1415926535897932384626433832795
+#endif
+
+#ifndef CV_PI_F
+ #ifndef CV_PI
+ #define CV_PI_F 3.14159265f
+ #else
+ #define CV_PI_F ((float)CV_PI)
+ #endif
+#endif
+
+#if defined(__GNUC__)
+ #define cudaSafeCall(expr) ___cudaSafeCall(expr, __FILE__, __LINE__, __func__)
+#else /* defined(__CUDACC__) || defined(__MSVC__) */
+ #define cudaSafeCall(expr) ___cudaSafeCall(expr, __FILE__, __LINE__)
+#endif
+
+namespace cv { namespace gpu
+{
+ void error(const char *error_string, const char *file, const int line, const char *func);
+
+ template <typename T> static inline bool isAligned(const T* ptr, size_t size)
+ {
+ return reinterpret_cast<size_t>(ptr) % size == 0;
+ }
+
+ static inline bool isAligned(size_t step, size_t size)
+ {
+ return step % size == 0;
+ }
+}}
+
+static inline void ___cudaSafeCall(cudaError_t err, const char *file, const int line, const char *func = "")
+{
+ if (cudaSuccess != err)
+ cv::gpu::error(cudaGetErrorString(err), file, line, func);
+}
+
+namespace cv { namespace gpu
+{
+ __host__ __device__ __forceinline__ int divUp(int total, int grain)
+ {
+ return (total + grain - 1) / grain;
+ }
+
+ namespace device
+ {
+ using cv::gpu::divUp;
+
+#ifdef __CUDACC__
+ typedef unsigned char uchar;
+ typedef unsigned short ushort;
+ typedef signed char schar;
+ #if defined (_WIN32) || defined (__APPLE__) || defined (__QNX__)
+ typedef unsigned int uint;
+ #endif
+
+ template<class T> inline void bindTexture(const textureReference* tex, const PtrStepSz<T>& img)
+ {
+ cudaChannelFormatDesc desc = cudaCreateChannelDesc<T>();
+ cudaSafeCall( cudaBindTexture2D(0, tex, img.ptr(), &desc, img.cols, img.rows, img.step) );
+ }
+#endif // __CUDACC__
+ }
+}}
+
+
+
+#endif // __OPENCV_GPU_COMMON_HPP__
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/datamov_utils.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/datamov_utils.hpp
new file mode 100644
index 00000000..a3f62fba
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/datamov_utils.hpp
@@ -0,0 +1,105 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_GPU_DATAMOV_UTILS_HPP__
+#define __OPENCV_GPU_DATAMOV_UTILS_HPP__
+
+#include "common.hpp"
+
+namespace cv { namespace gpu { namespace device
+{
+ #if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 200
+
+ // for Fermi memory space is detected automatically
+ template <typename T> struct ForceGlob
+ {
+ __device__ __forceinline__ static void Load(const T* ptr, int offset, T& val) { val = ptr[offset]; }
+ };
+
+ #else // __CUDA_ARCH__ >= 200
+
+ #if defined(_WIN64) || defined(__LP64__)
+ // 64-bit register modifier for inlined asm
+ #define OPENCV_GPU_ASM_PTR "l"
+ #else
+ // 32-bit register modifier for inlined asm
+ #define OPENCV_GPU_ASM_PTR "r"
+ #endif
+
+ template<class T> struct ForceGlob;
+
+ #define OPENCV_GPU_DEFINE_FORCE_GLOB(base_type, ptx_type, reg_mod) \
+ template <> struct ForceGlob<base_type> \
+ { \
+ __device__ __forceinline__ static void Load(const base_type* ptr, int offset, base_type& val) \
+ { \
+ asm("ld.global."#ptx_type" %0, [%1];" : "="#reg_mod(val) : OPENCV_GPU_ASM_PTR(ptr + offset)); \
+ } \
+ };
+
+ #define OPENCV_GPU_DEFINE_FORCE_GLOB_B(base_type, ptx_type) \
+ template <> struct ForceGlob<base_type> \
+ { \
+ __device__ __forceinline__ static void Load(const base_type* ptr, int offset, base_type& val) \
+ { \
+ asm("ld.global."#ptx_type" %0, [%1];" : "=r"(*reinterpret_cast<uint*>(&val)) : OPENCV_GPU_ASM_PTR(ptr + offset)); \
+ } \
+ };
+
+ OPENCV_GPU_DEFINE_FORCE_GLOB_B(uchar, u8)
+ OPENCV_GPU_DEFINE_FORCE_GLOB_B(schar, s8)
+ OPENCV_GPU_DEFINE_FORCE_GLOB_B(char, b8)
+ OPENCV_GPU_DEFINE_FORCE_GLOB (ushort, u16, h)
+ OPENCV_GPU_DEFINE_FORCE_GLOB (short, s16, h)
+ OPENCV_GPU_DEFINE_FORCE_GLOB (uint, u32, r)
+ OPENCV_GPU_DEFINE_FORCE_GLOB (int, s32, r)
+ OPENCV_GPU_DEFINE_FORCE_GLOB (float, f32, f)
+ OPENCV_GPU_DEFINE_FORCE_GLOB (double, f64, d)
+
+ #undef OPENCV_GPU_DEFINE_FORCE_GLOB
+ #undef OPENCV_GPU_DEFINE_FORCE_GLOB_B
+ #undef OPENCV_GPU_ASM_PTR
+
+ #endif // __CUDA_ARCH__ >= 200
+}}} // namespace cv { namespace gpu { namespace device
+
+#endif // __OPENCV_GPU_DATAMOV_UTILS_HPP__
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/detail/color_detail.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/detail/color_detail.hpp
new file mode 100644
index 00000000..c4ec64b5
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/detail/color_detail.hpp
@@ -0,0 +1,2219 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_GPU_COLOR_DETAIL_HPP__
+#define __OPENCV_GPU_COLOR_DETAIL_HPP__
+
+#include "../common.hpp"
+#include "../vec_traits.hpp"
+#include "../saturate_cast.hpp"
+#include "../limits.hpp"
+#include "../functional.hpp"
+
+namespace cv { namespace gpu { namespace device
+{
+ #ifndef CV_DESCALE
+ #define CV_DESCALE(x, n) (((x) + (1 << ((n)-1))) >> (n))
+ #endif
+
+ namespace color_detail
+ {
+ template<typename T> struct ColorChannel
+ {
+ typedef float worktype_f;
+ static __device__ __forceinline__ T max() { return numeric_limits<T>::max(); }
+ static __device__ __forceinline__ T half() { return (T)(max()/2 + 1); }
+ };
+
+ template<> struct ColorChannel<float>
+ {
+ typedef float worktype_f;
+ static __device__ __forceinline__ float max() { return 1.f; }
+ static __device__ __forceinline__ float half() { return 0.5f; }
+ };
+
+ template <typename T> static __device__ __forceinline__ void setAlpha(typename TypeVec<T, 3>::vec_type& vec, T val)
+ {
+ }
+
+ template <typename T> static __device__ __forceinline__ void setAlpha(typename TypeVec<T, 4>::vec_type& vec, T val)
+ {
+ vec.w = val;
+ }
+
+ template <typename T> static __device__ __forceinline__ T getAlpha(const typename TypeVec<T, 3>::vec_type& vec)
+ {
+ return ColorChannel<T>::max();
+ }
+
+ template <typename T> static __device__ __forceinline__ T getAlpha(const typename TypeVec<T, 4>::vec_type& vec)
+ {
+ return vec.w;
+ }
+
+ enum
+ {
+ yuv_shift = 14,
+ xyz_shift = 12,
+ R2Y = 4899,
+ G2Y = 9617,
+ B2Y = 1868,
+ BLOCK_SIZE = 256
+ };
+ }
+
+////////////////// Various 3/4-channel to 3/4-channel RGB transformations /////////////////
+
+ namespace color_detail
+ {
+ template <typename T, int scn, int dcn, int bidx> struct RGB2RGB
+ : unary_function<typename TypeVec<T, scn>::vec_type, typename TypeVec<T, dcn>::vec_type>
+ {
+ __device__ typename TypeVec<T, dcn>::vec_type operator()(const typename TypeVec<T, scn>::vec_type& src) const
+ {
+ typename TypeVec<T, dcn>::vec_type dst;
+
+ dst.x = bidx == 0 ? src.x : src.z;
+ dst.y = src.y;
+ dst.z = bidx == 0 ? src.z : src.x;
+ setAlpha(dst, getAlpha<T>(src));
+
+ return dst;
+ }
+
+ __host__ __device__ __forceinline__ RGB2RGB() {}
+ __host__ __device__ __forceinline__ RGB2RGB(const RGB2RGB&) {}
+ };
+
+ template <> struct RGB2RGB<uchar, 4, 4, 2> : unary_function<uint, uint>
+ {
+ __device__ uint operator()(uint src) const
+ {
+ uint dst = 0;
+
+ dst |= (0xffu & (src >> 16));
+ dst |= (0xffu & (src >> 8)) << 8;
+ dst |= (0xffu & (src)) << 16;
+ dst |= (0xffu & (src >> 24)) << 24;
+
+ return dst;
+ }
+
+ __host__ __device__ __forceinline__ RGB2RGB() {}
+ __host__ __device__ __forceinline__ RGB2RGB(const RGB2RGB&) {}
+ };
+ }
+
+#define OPENCV_GPU_IMPLEMENT_RGB2RGB_TRAITS(name, scn, dcn, bidx) \
+ template <typename T> struct name ## _traits \
+ { \
+ typedef ::cv::gpu::device::color_detail::RGB2RGB<T, scn, dcn, bidx> functor_type; \
+ static __host__ __device__ __forceinline__ functor_type create_functor() \
+ { \
+ return functor_type(); \
+ } \
+ };
+
+/////////// Transforming 16-bit (565 or 555) RGB to/from 24/32-bit (888[8]) RGB //////////
+
+ namespace color_detail
+ {
+ template <int green_bits, int bidx> struct RGB2RGB5x5Converter;
+
+ template<int bidx> struct RGB2RGB5x5Converter<6, bidx>
+ {
+ template <typename T>
+ static __device__ __forceinline__ ushort cvt(const T& src)
+ {
+ uint b = bidx == 0 ? src.x : src.z;
+ uint g = src.y;
+ uint r = bidx == 0 ? src.z : src.x;
+ return (ushort)((b >> 3) | ((g & ~3) << 3) | ((r & ~7) << 8));
+ }
+ };
+
+ template<int bidx> struct RGB2RGB5x5Converter<5, bidx>
+ {
+ static __device__ __forceinline__ ushort cvt(const uchar3& src)
+ {
+ uint b = bidx == 0 ? src.x : src.z;
+ uint g = src.y;
+ uint r = bidx == 0 ? src.z : src.x;
+ return (ushort)((b >> 3) | ((g & ~7) << 2) | ((r & ~7) << 7));
+ }
+
+ static __device__ __forceinline__ ushort cvt(const uchar4& src)
+ {
+ uint b = bidx == 0 ? src.x : src.z;
+ uint g = src.y;
+ uint r = bidx == 0 ? src.z : src.x;
+ uint a = src.w;
+ return (ushort)((b >> 3) | ((g & ~7) << 2) | ((r & ~7) << 7) | (a * 0x8000));
+ }
+ };
+
+ template<int scn, int bidx, int green_bits> struct RGB2RGB5x5:
+ unary_function<typename TypeVec<uchar, scn>::vec_type, ushort>
+ {
+ __device__ __forceinline__ ushort operator()(const typename TypeVec<uchar, scn>::vec_type& src) const
+ {
+ return RGB2RGB5x5Converter<green_bits, bidx>::cvt(src);
+ }
+
+ __host__ __device__ __forceinline__ RGB2RGB5x5() {}
+ __host__ __device__ __forceinline__ RGB2RGB5x5(const RGB2RGB5x5&) {}
+ };
+ }
+
+#define OPENCV_GPU_IMPLEMENT_RGB2RGB5x5_TRAITS(name, scn, bidx, green_bits) \
+ struct name ## _traits \
+ { \
+ typedef ::cv::gpu::device::color_detail::RGB2RGB5x5<scn, bidx, green_bits> functor_type; \
+ static __host__ __device__ __forceinline__ functor_type create_functor() \
+ { \
+ return functor_type(); \
+ } \
+ };
+
+ namespace color_detail
+ {
+ template <int green_bits, int bidx> struct RGB5x52RGBConverter;
+
+ template <int bidx> struct RGB5x52RGBConverter<5, bidx>
+ {
+ static __device__ __forceinline__ void cvt(uint src, uchar3& dst)
+ {
+ (bidx == 0 ? dst.x : dst.z) = src << 3;
+ dst.y = (src >> 2) & ~7;
+ (bidx == 0 ? dst.z : dst.x) = (src >> 7) & ~7;
+ }
+
+ static __device__ __forceinline__ void cvt(uint src, uint& dst)
+ {
+ dst = 0;
+
+ dst |= (0xffu & (src << 3)) << (bidx * 8);
+ dst |= (0xffu & ((src >> 2) & ~7)) << 8;
+ dst |= (0xffu & ((src >> 7) & ~7)) << ((bidx ^ 2) * 8);
+ dst |= ((src & 0x8000) * 0xffu) << 24;
+ }
+ };
+
+ template <int bidx> struct RGB5x52RGBConverter<6, bidx>
+ {
+ static __device__ __forceinline__ void cvt(uint src, uchar3& dst)
+ {
+ (bidx == 0 ? dst.x : dst.z) = src << 3;
+ dst.y = (src >> 3) & ~3;
+ (bidx == 0 ? dst.z : dst.x) = (src >> 8) & ~7;
+ }
+
+ static __device__ __forceinline__ void cvt(uint src, uint& dst)
+ {
+ dst = 0xffu << 24;
+
+ dst |= (0xffu & (src << 3)) << (bidx * 8);
+ dst |= (0xffu &((src >> 3) & ~3)) << 8;
+ dst |= (0xffu & ((src >> 8) & ~7)) << ((bidx ^ 2) * 8);
+ }
+ };
+
+ template <int dcn, int bidx, int green_bits> struct RGB5x52RGB;
+
+ template <int bidx, int green_bits> struct RGB5x52RGB<3, bidx, green_bits> : unary_function<ushort, uchar3>
+ {
+ __device__ __forceinline__ uchar3 operator()(ushort src) const
+ {
+ uchar3 dst;
+ RGB5x52RGBConverter<green_bits, bidx>::cvt(src, dst);
+ return dst;
+ }
+
+ __host__ __device__ __forceinline__ RGB5x52RGB() {}
+ __host__ __device__ __forceinline__ RGB5x52RGB(const RGB5x52RGB&) {}
+ };
+
+ template <int bidx, int green_bits> struct RGB5x52RGB<4, bidx, green_bits> : unary_function<ushort, uint>
+ {
+ __device__ __forceinline__ uint operator()(ushort src) const
+ {
+ uint dst;
+ RGB5x52RGBConverter<green_bits, bidx>::cvt(src, dst);
+ return dst;
+ }
+
+ __host__ __device__ __forceinline__ RGB5x52RGB() {}
+ __host__ __device__ __forceinline__ RGB5x52RGB(const RGB5x52RGB&) {}
+ };
+ }
+
+#define OPENCV_GPU_IMPLEMENT_RGB5x52RGB_TRAITS(name, dcn, bidx, green_bits) \
+ struct name ## _traits \
+ { \
+ typedef ::cv::gpu::device::color_detail::RGB5x52RGB<dcn, bidx, green_bits> functor_type; \
+ static __host__ __device__ __forceinline__ functor_type create_functor() \
+ { \
+ return functor_type(); \
+ } \
+ };
+
+///////////////////////////////// Grayscale to Color ////////////////////////////////
+
+ namespace color_detail
+ {
+ template <typename T, int dcn> struct Gray2RGB : unary_function<T, typename TypeVec<T, dcn>::vec_type>
+ {
+ __device__ __forceinline__ typename TypeVec<T, dcn>::vec_type operator()(T src) const
+ {
+ typename TypeVec<T, dcn>::vec_type dst;
+
+ dst.z = dst.y = dst.x = src;
+ setAlpha(dst, ColorChannel<T>::max());
+
+ return dst;
+ }
+
+ __host__ __device__ __forceinline__ Gray2RGB() {}
+ __host__ __device__ __forceinline__ Gray2RGB(const Gray2RGB&) {}
+ };
+
+ template <> struct Gray2RGB<uchar, 4> : unary_function<uchar, uint>
+ {
+ __device__ __forceinline__ uint operator()(uint src) const
+ {
+ uint dst = 0xffu << 24;
+
+ dst |= src;
+ dst |= src << 8;
+ dst |= src << 16;
+
+ return dst;
+ }
+
+ __host__ __device__ __forceinline__ Gray2RGB() {}
+ __host__ __device__ __forceinline__ Gray2RGB(const Gray2RGB&) {}
+ };
+ }
+
+#define OPENCV_GPU_IMPLEMENT_GRAY2RGB_TRAITS(name, dcn) \
+ template <typename T> struct name ## _traits \
+ { \
+ typedef ::cv::gpu::device::color_detail::Gray2RGB<T, dcn> functor_type; \
+ static __host__ __device__ __forceinline__ functor_type create_functor() \
+ { \
+ return functor_type(); \
+ } \
+ };
+
+ namespace color_detail
+ {
+ template <int green_bits> struct Gray2RGB5x5Converter;
+
+ template<> struct Gray2RGB5x5Converter<6>
+ {
+ static __device__ __forceinline__ ushort cvt(uint t)
+ {
+ return (ushort)((t >> 3) | ((t & ~3) << 3) | ((t & ~7) << 8));
+ }
+ };
+
+ template<> struct Gray2RGB5x5Converter<5>
+ {
+ static __device__ __forceinline__ ushort cvt(uint t)
+ {
+ t >>= 3;
+ return (ushort)(t | (t << 5) | (t << 10));
+ }
+ };
+
+ template<int green_bits> struct Gray2RGB5x5 : unary_function<uchar, ushort>
+ {
+ __device__ __forceinline__ ushort operator()(uint src) const
+ {
+ return Gray2RGB5x5Converter<green_bits>::cvt(src);
+ }
+
+ __host__ __device__ __forceinline__ Gray2RGB5x5() {}
+ __host__ __device__ __forceinline__ Gray2RGB5x5(const Gray2RGB5x5&) {}
+ };
+ }
+
+#define OPENCV_GPU_IMPLEMENT_GRAY2RGB5x5_TRAITS(name, green_bits) \
+ struct name ## _traits \
+ { \
+ typedef ::cv::gpu::device::color_detail::Gray2RGB5x5<green_bits> functor_type; \
+ static __host__ __device__ __forceinline__ functor_type create_functor() \
+ { \
+ return functor_type(); \
+ } \
+ };
+
+///////////////////////////////// Color to Grayscale ////////////////////////////////
+
+ namespace color_detail
+ {
+ template <int green_bits> struct RGB5x52GrayConverter;
+
+ template <> struct RGB5x52GrayConverter<6>
+ {
+ static __device__ __forceinline__ uchar cvt(uint t)
+ {
+ return (uchar)CV_DESCALE(((t << 3) & 0xf8) * B2Y + ((t >> 3) & 0xfc) * G2Y + ((t >> 8) & 0xf8) * R2Y, yuv_shift);
+ }
+ };
+
+ template <> struct RGB5x52GrayConverter<5>
+ {
+ static __device__ __forceinline__ uchar cvt(uint t)
+ {
+ return (uchar)CV_DESCALE(((t << 3) & 0xf8) * B2Y + ((t >> 2) & 0xf8) * G2Y + ((t >> 7) & 0xf8) * R2Y, yuv_shift);
+ }
+ };
+
+ template<int green_bits> struct RGB5x52Gray : unary_function<ushort, uchar>
+ {
+ __device__ __forceinline__ uchar operator()(uint src) const
+ {
+ return RGB5x52GrayConverter<green_bits>::cvt(src);
+ }
+
+ __host__ __device__ __forceinline__ RGB5x52Gray() {}
+ __host__ __device__ __forceinline__ RGB5x52Gray(const RGB5x52Gray&) {}
+ };
+ }
+
+#define OPENCV_GPU_IMPLEMENT_RGB5x52GRAY_TRAITS(name, green_bits) \
+ struct name ## _traits \
+ { \
+ typedef ::cv::gpu::device::color_detail::RGB5x52Gray<green_bits> functor_type; \
+ static __host__ __device__ __forceinline__ functor_type create_functor() \
+ { \
+ return functor_type(); \
+ } \
+ };
+
+ namespace color_detail
+ {
+ template <int bidx, typename T> static __device__ __forceinline__ uint RGB2GrayConvert_8U(const T& src)
+ {
+ uint b = bidx == 0 ? src.x : src.z;
+ uint g = src.y;
+ uint r = bidx == 0 ? src.z : src.x;
+ return CV_DESCALE((uint)(b * B2Y + g * G2Y + r * R2Y), yuv_shift);
+ }
+
+ template <int bidx> static __device__ __forceinline__ uchar RGB2GrayConvert_8UC4(uint src)
+ {
+ uint b = 0xffu & (src >> (bidx * 8));
+ uint g = 0xffu & (src >> 8);
+ uint r = 0xffu & (src >> ((bidx ^ 2) * 8));
+ return CV_DESCALE((uint)(b * B2Y + g * G2Y + r * R2Y), yuv_shift);
+ }
+
+ template <int bidx, typename T> static __device__ __forceinline__ float RGB2GrayConvert_32F(const T& src)
+ {
+ float b = bidx == 0 ? src.x : src.z;
+ float g = src.y;
+ float r = bidx == 0 ? src.z : src.x;
+ return b * 0.114f + g * 0.587f + r * 0.299f;
+ }
+
+ template <typename T, int scn, int bidx> struct RGB2Gray : unary_function<typename TypeVec<T, scn>::vec_type, T>
+ {
+ __device__ __forceinline__ T operator()(const typename TypeVec<T, scn>::vec_type& src) const
+ {
+ return RGB2GrayConvert_8U<bidx>(src);
+ }
+
+ __host__ __device__ __forceinline__ RGB2Gray() {}
+ __host__ __device__ __forceinline__ RGB2Gray(const RGB2Gray&) {}
+ };
+
+ template <int scn, int bidx> struct RGB2Gray<float, scn, bidx> :
+ unary_function<typename TypeVec<float, scn>::vec_type, float>
+ {
+ __device__ __forceinline__ float operator()(const typename TypeVec<float, scn>::vec_type& src) const
+ {
+ return RGB2GrayConvert_32F<bidx>(src);
+ }
+
+ __host__ __device__ __forceinline__ RGB2Gray() {}
+ __host__ __device__ __forceinline__ RGB2Gray(const RGB2Gray&) {}
+ };
+
+ template <int bidx> struct RGB2Gray<uchar, 4, bidx> : unary_function<uint, uchar>
+ {
+ __device__ __forceinline__ uchar operator()(uint src) const
+ {
+ return RGB2GrayConvert_8UC4<bidx>(src);
+ }
+
+ __host__ __device__ __forceinline__ RGB2Gray() {}
+ __host__ __device__ __forceinline__ RGB2Gray(const RGB2Gray&) {}
+ };
+ }
+
+#define OPENCV_GPU_IMPLEMENT_RGB2GRAY_TRAITS(name, scn, bidx) \
+ template <typename T> struct name ## _traits \
+ { \
+ typedef ::cv::gpu::device::color_detail::RGB2Gray<T, scn, bidx> functor_type; \
+ static __host__ __device__ __forceinline__ functor_type create_functor() \
+ { \
+ return functor_type(); \
+ } \
+ };
+
+///////////////////////////////////// RGB <-> YUV //////////////////////////////////////
+
+ namespace color_detail
+ {
+ __constant__ float c_RGB2YUVCoeffs_f[5] = { 0.114f, 0.587f, 0.299f, 0.492f, 0.877f };
+ __constant__ int c_RGB2YUVCoeffs_i[5] = { B2Y, G2Y, R2Y, 8061, 14369 };
+
+ template <int bidx, typename T, typename D> static __device__ void RGB2YUVConvert(const T& src, D& dst)
+ {
+ const int delta = ColorChannel<typename VecTraits<T>::elem_type>::half() * (1 << yuv_shift);
+
+ const int b = bidx == 0 ? src.x : src.z;
+ const int g = src.y;
+ const int r = bidx == 0 ? src.z : src.x;
+
+ const int Y = CV_DESCALE(b * c_RGB2YUVCoeffs_i[2] + g * c_RGB2YUVCoeffs_i[1] + r * c_RGB2YUVCoeffs_i[0], yuv_shift);
+ const int Cr = CV_DESCALE((r - Y) * c_RGB2YUVCoeffs_i[3] + delta, yuv_shift);
+ const int Cb = CV_DESCALE((b - Y) * c_RGB2YUVCoeffs_i[4] + delta, yuv_shift);
+
+ dst.x = saturate_cast<typename VecTraits<T>::elem_type>(Y);
+ dst.y = saturate_cast<typename VecTraits<T>::elem_type>(Cr);
+ dst.z = saturate_cast<typename VecTraits<T>::elem_type>(Cb);
+ }
+
+ template <int bidx, typename T, typename D> static __device__ __forceinline__ void RGB2YUVConvert_32F(const T& src, D& dst)
+ {
+ const float b = bidx == 0 ? src.x : src.z;
+ const float g = src.y;
+ const float r = bidx == 0 ? src.z : src.x;
+
+ dst.x = b * c_RGB2YUVCoeffs_f[2] + g * c_RGB2YUVCoeffs_f[1] + r * c_RGB2YUVCoeffs_f[0];
+ dst.y = (r - dst.x) * c_RGB2YUVCoeffs_f[3] + ColorChannel<float>::half();
+ dst.z = (b - dst.x) * c_RGB2YUVCoeffs_f[4] + ColorChannel<float>::half();
+ }
+
+ template <typename T, int scn, int dcn, int bidx> struct RGB2YUV
+ : unary_function<typename TypeVec<T, scn>::vec_type, typename TypeVec<T, dcn>::vec_type>
+ {
+ __device__ __forceinline__ typename TypeVec<T, dcn>::vec_type operator ()(const typename TypeVec<T, scn>::vec_type& src) const
+ {
+ typename TypeVec<T, dcn>::vec_type dst;
+ RGB2YUVConvert<bidx>(src, dst);
+ return dst;
+ }
+
+ __host__ __device__ __forceinline__ RGB2YUV() {}
+ __host__ __device__ __forceinline__ RGB2YUV(const RGB2YUV&) {}
+ };
+
+ template <int scn, int dcn, int bidx> struct RGB2YUV<float, scn, dcn, bidx>
+ : unary_function<typename TypeVec<float, scn>::vec_type, typename TypeVec<float, dcn>::vec_type>
+ {
+ __device__ __forceinline__ typename TypeVec<float, dcn>::vec_type operator ()(const typename TypeVec<float, scn>::vec_type& src) const
+ {
+ typename TypeVec<float, dcn>::vec_type dst;
+ RGB2YUVConvert_32F<bidx>(src, dst);
+ return dst;
+ }
+
+ __host__ __device__ __forceinline__ RGB2YUV() {}
+ __host__ __device__ __forceinline__ RGB2YUV(const RGB2YUV&) {}
+ };
+ }
+
+#define OPENCV_GPU_IMPLEMENT_RGB2YUV_TRAITS(name, scn, dcn, bidx) \
+ template <typename T> struct name ## _traits \
+ { \
+ typedef ::cv::gpu::device::color_detail::RGB2YUV<T, scn, dcn, bidx> functor_type; \
+ static __host__ __device__ __forceinline__ functor_type create_functor() \
+ { \
+ return functor_type(); \
+ } \
+ };
+
+ namespace color_detail
+ {
+ __constant__ float c_YUV2RGBCoeffs_f[5] = { 2.032f, -0.395f, -0.581f, 1.140f };
+ __constant__ int c_YUV2RGBCoeffs_i[5] = { 33292, -6472, -9519, 18678 };
+
+ template <int bidx, typename T, typename D> static __device__ void YUV2RGBConvert(const T& src, D& dst)
+ {
+ const int b = src.x + CV_DESCALE((src.z - ColorChannel<typename VecTraits<D>::elem_type>::half()) * c_YUV2RGBCoeffs_i[3], yuv_shift);
+
+ const int g = src.x + CV_DESCALE((src.z - ColorChannel<typename VecTraits<D>::elem_type>::half()) * c_YUV2RGBCoeffs_i[2]
+ + (src.y - ColorChannel<typename VecTraits<D>::elem_type>::half()) * c_YUV2RGBCoeffs_i[1], yuv_shift);
+
+ const int r = src.x + CV_DESCALE((src.y - ColorChannel<typename VecTraits<D>::elem_type>::half()) * c_YUV2RGBCoeffs_i[0], yuv_shift);
+
+ (bidx == 0 ? dst.x : dst.z) = saturate_cast<typename VecTraits<D>::elem_type>(b);
+ dst.y = saturate_cast<typename VecTraits<D>::elem_type>(g);
+ (bidx == 0 ? dst.z : dst.x) = saturate_cast<typename VecTraits<D>::elem_type>(r);
+ }
+
+ template <int bidx> static __device__ uint YUV2RGBConvert_8UC4(uint src)
+ {
+ const int x = 0xff & (src);
+ const int y = 0xff & (src >> 8);
+ const int z = 0xff & (src >> 16);
+
+ const int b = x + CV_DESCALE((z - ColorChannel<uchar>::half()) * c_YUV2RGBCoeffs_i[3], yuv_shift);
+
+ const int g = x + CV_DESCALE((z - ColorChannel<uchar>::half()) * c_YUV2RGBCoeffs_i[2]
+ + (y - ColorChannel<uchar>::half()) * c_YUV2RGBCoeffs_i[1], yuv_shift);
+
+ const int r = x + CV_DESCALE((y - ColorChannel<uchar>::half()) * c_YUV2RGBCoeffs_i[0], yuv_shift);
+
+ uint dst = 0xffu << 24;
+
+ dst |= saturate_cast<uchar>(b) << (bidx * 8);
+ dst |= saturate_cast<uchar>(g) << 8;
+ dst |= saturate_cast<uchar>(r) << ((bidx ^ 2) * 8);
+
+ return dst;
+ }
+
+ template <int bidx, typename T, typename D> static __device__ __forceinline__ void YUV2RGBConvert_32F(const T& src, D& dst)
+ {
+ (bidx == 0 ? dst.x : dst.z) = src.x + (src.z - ColorChannel<float>::half()) * c_YUV2RGBCoeffs_f[3];
+
+ dst.y = src.x + (src.z - ColorChannel<float>::half()) * c_YUV2RGBCoeffs_f[2]
+ + (src.y - ColorChannel<float>::half()) * c_YUV2RGBCoeffs_f[1];
+
+ (bidx == 0 ? dst.z : dst.x) = src.x + (src.y - ColorChannel<float>::half()) * c_YUV2RGBCoeffs_f[0];
+ }
+
+ template <typename T, int scn, int dcn, int bidx> struct YUV2RGB
+ : unary_function<typename TypeVec<T, scn>::vec_type, typename TypeVec<T, dcn>::vec_type>
+ {
+ __device__ __forceinline__ typename TypeVec<T, dcn>::vec_type operator ()(const typename TypeVec<T, scn>::vec_type& src) const
+ {
+ typename TypeVec<T, dcn>::vec_type dst;
+
+ YUV2RGBConvert<bidx>(src, dst);
+ setAlpha(dst, ColorChannel<T>::max());
+
+ return dst;
+ }
+
+ __host__ __device__ __forceinline__ YUV2RGB() {}
+ __host__ __device__ __forceinline__ YUV2RGB(const YUV2RGB&) {}
+ };
+
+ template <int scn, int dcn, int bidx> struct YUV2RGB<float, scn, dcn, bidx>
+ : unary_function<typename TypeVec<float, scn>::vec_type, typename TypeVec<float, dcn>::vec_type>
+ {
+ __device__ __forceinline__ typename TypeVec<float, dcn>::vec_type operator ()(const typename TypeVec<float, scn>::vec_type& src) const
+ {
+ typename TypeVec<float, dcn>::vec_type dst;
+
+ YUV2RGBConvert_32F<bidx>(src, dst);
+ setAlpha(dst, ColorChannel<float>::max());
+
+ return dst;
+ }
+
+ __host__ __device__ __forceinline__ YUV2RGB() {}
+ __host__ __device__ __forceinline__ YUV2RGB(const YUV2RGB&) {}
+ };
+
+ template <int bidx> struct YUV2RGB<uchar, 4, 4, bidx> : unary_function<uint, uint>
+ {
+ __device__ __forceinline__ uint operator ()(uint src) const
+ {
+ return YUV2RGBConvert_8UC4<bidx>(src);
+ }
+
+ __host__ __device__ __forceinline__ YUV2RGB() {}
+ __host__ __device__ __forceinline__ YUV2RGB(const YUV2RGB&) {}
+ };
+ }
+
+#define OPENCV_GPU_IMPLEMENT_YUV2RGB_TRAITS(name, scn, dcn, bidx) \
+ template <typename T> struct name ## _traits \
+ { \
+ typedef ::cv::gpu::device::color_detail::YUV2RGB<T, scn, dcn, bidx> functor_type; \
+ static __host__ __device__ __forceinline__ functor_type create_functor() \
+ { \
+ return functor_type(); \
+ } \
+ };
+
+///////////////////////////////////// RGB <-> YCrCb //////////////////////////////////////
+
+ namespace color_detail
+ {
+ __constant__ float c_RGB2YCrCbCoeffs_f[5] = {0.299f, 0.587f, 0.114f, 0.713f, 0.564f};
+ __constant__ int c_RGB2YCrCbCoeffs_i[5] = {R2Y, G2Y, B2Y, 11682, 9241};
+
+ template <int bidx, typename T, typename D> static __device__ void RGB2YCrCbConvert(const T& src, D& dst)
+ {
+ const int delta = ColorChannel<typename VecTraits<T>::elem_type>::half() * (1 << yuv_shift);
+
+ const int b = bidx == 0 ? src.x : src.z;
+ const int g = src.y;
+ const int r = bidx == 0 ? src.z : src.x;
+
+ const int Y = CV_DESCALE(b * c_RGB2YCrCbCoeffs_i[2] + g * c_RGB2YCrCbCoeffs_i[1] + r * c_RGB2YCrCbCoeffs_i[0], yuv_shift);
+ const int Cr = CV_DESCALE((r - Y) * c_RGB2YCrCbCoeffs_i[3] + delta, yuv_shift);
+ const int Cb = CV_DESCALE((b - Y) * c_RGB2YCrCbCoeffs_i[4] + delta, yuv_shift);
+
+ dst.x = saturate_cast<typename VecTraits<T>::elem_type>(Y);
+ dst.y = saturate_cast<typename VecTraits<T>::elem_type>(Cr);
+ dst.z = saturate_cast<typename VecTraits<T>::elem_type>(Cb);
+ }
+
+ template <int bidx> static __device__ uint RGB2YCrCbConvert_8UC4(uint src)
+ {
+ const int delta = ColorChannel<uchar>::half() * (1 << yuv_shift);
+
+ const int Y = CV_DESCALE((0xffu & src) * c_RGB2YCrCbCoeffs_i[bidx^2] + (0xffu & (src >> 8)) * c_RGB2YCrCbCoeffs_i[1] + (0xffu & (src >> 16)) * c_RGB2YCrCbCoeffs_i[bidx], yuv_shift);
+ const int Cr = CV_DESCALE(((0xffu & (src >> ((bidx ^ 2) * 8))) - Y) * c_RGB2YCrCbCoeffs_i[3] + delta, yuv_shift);
+ const int Cb = CV_DESCALE(((0xffu & (src >> (bidx * 8))) - Y) * c_RGB2YCrCbCoeffs_i[4] + delta, yuv_shift);
+
+ uint dst = 0;
+
+ dst |= saturate_cast<uchar>(Y);
+ dst |= saturate_cast<uchar>(Cr) << 8;
+ dst |= saturate_cast<uchar>(Cb) << 16;
+
+ return dst;
+ }
+
+ template <int bidx, typename T, typename D> static __device__ __forceinline__ void RGB2YCrCbConvert_32F(const T& src, D& dst)
+ {
+ const float b = bidx == 0 ? src.x : src.z;
+ const float g = src.y;
+ const float r = bidx == 0 ? src.z : src.x;
+
+ dst.x = b * c_RGB2YCrCbCoeffs_f[2] + g * c_RGB2YCrCbCoeffs_f[1] + r * c_RGB2YCrCbCoeffs_f[0];
+ dst.y = (r - dst.x) * c_RGB2YCrCbCoeffs_f[3] + ColorChannel<float>::half();
+ dst.z = (b - dst.x) * c_RGB2YCrCbCoeffs_f[4] + ColorChannel<float>::half();
+ }
+
+ template <typename T, int scn, int dcn, int bidx> struct RGB2YCrCb
+ : unary_function<typename TypeVec<T, scn>::vec_type, typename TypeVec<T, dcn>::vec_type>
+ {
+ __device__ __forceinline__ typename TypeVec<T, dcn>::vec_type operator ()(const typename TypeVec<T, scn>::vec_type& src) const
+ {
+ typename TypeVec<T, dcn>::vec_type dst;
+ RGB2YCrCbConvert<bidx>(src, dst);
+ return dst;
+ }
+
+ __host__ __device__ __forceinline__ RGB2YCrCb() {}
+ __host__ __device__ __forceinline__ RGB2YCrCb(const RGB2YCrCb&) {}
+ };
+
+ template <int scn, int dcn, int bidx> struct RGB2YCrCb<float, scn, dcn, bidx>
+ : unary_function<typename TypeVec<float, scn>::vec_type, typename TypeVec<float, dcn>::vec_type>
+ {
+ __device__ __forceinline__ typename TypeVec<float, dcn>::vec_type operator ()(const typename TypeVec<float, scn>::vec_type& src) const
+ {
+ typename TypeVec<float, dcn>::vec_type dst;
+ RGB2YCrCbConvert_32F<bidx>(src, dst);
+ return dst;
+ }
+
+ __host__ __device__ __forceinline__ RGB2YCrCb() {}
+ __host__ __device__ __forceinline__ RGB2YCrCb(const RGB2YCrCb&) {}
+ };
+
+ template <int bidx> struct RGB2YCrCb<uchar, 4, 4, bidx> : unary_function<uint, uint>
+ {
+ __device__ __forceinline__ uint operator ()(uint src) const
+ {
+ return RGB2YCrCbConvert_8UC4<bidx>(src);
+ }
+
+ __host__ __device__ __forceinline__ RGB2YCrCb() {}
+ __host__ __device__ __forceinline__ RGB2YCrCb(const RGB2YCrCb&) {}
+ };
+ }
+
+#define OPENCV_GPU_IMPLEMENT_RGB2YCrCb_TRAITS(name, scn, dcn, bidx) \
+ template <typename T> struct name ## _traits \
+ { \
+ typedef ::cv::gpu::device::color_detail::RGB2YCrCb<T, scn, dcn, bidx> functor_type; \
+ static __host__ __device__ __forceinline__ functor_type create_functor() \
+ { \
+ return functor_type(); \
+ } \
+ };
+
+ namespace color_detail
+ {
+ __constant__ float c_YCrCb2RGBCoeffs_f[5] = {1.403f, -0.714f, -0.344f, 1.773f};
+ __constant__ int c_YCrCb2RGBCoeffs_i[5] = {22987, -11698, -5636, 29049};
+
+ template <int bidx, typename T, typename D> static __device__ void YCrCb2RGBConvert(const T& src, D& dst)
+ {
+ const int b = src.x + CV_DESCALE((src.z - ColorChannel<typename VecTraits<D>::elem_type>::half()) * c_YCrCb2RGBCoeffs_i[3], yuv_shift);
+ const int g = src.x + CV_DESCALE((src.z - ColorChannel<typename VecTraits<D>::elem_type>::half()) * c_YCrCb2RGBCoeffs_i[2] +
+ (src.y - ColorChannel<typename VecTraits<D>::elem_type>::half()) * c_YCrCb2RGBCoeffs_i[1], yuv_shift);
+ const int r = src.x + CV_DESCALE((src.y - ColorChannel<typename VecTraits<D>::elem_type>::half()) * c_YCrCb2RGBCoeffs_i[0], yuv_shift);
+
+ (bidx == 0 ? dst.x : dst.z) = saturate_cast<typename VecTraits<D>::elem_type>(b);
+ dst.y = saturate_cast<typename VecTraits<D>::elem_type>(g);
+ (bidx == 0 ? dst.z : dst.x) = saturate_cast<typename VecTraits<D>::elem_type>(r);
+ }
+
+ template <int bidx> static __device__ uint YCrCb2RGBConvert_8UC4(uint src)
+ {
+ const int x = 0xff & (src);
+ const int y = 0xff & (src >> 8);
+ const int z = 0xff & (src >> 16);
+
+ const int b = x + CV_DESCALE((z - ColorChannel<uchar>::half()) * c_YCrCb2RGBCoeffs_i[3], yuv_shift);
+ const int g = x + CV_DESCALE((z - ColorChannel<uchar>::half()) * c_YCrCb2RGBCoeffs_i[2] + (y - ColorChannel<uchar>::half()) * c_YCrCb2RGBCoeffs_i[1], yuv_shift);
+ const int r = x + CV_DESCALE((y - ColorChannel<uchar>::half()) * c_YCrCb2RGBCoeffs_i[0], yuv_shift);
+
+ uint dst = 0xffu << 24;
+
+ dst |= saturate_cast<uchar>(b) << (bidx * 8);
+ dst |= saturate_cast<uchar>(g) << 8;
+ dst |= saturate_cast<uchar>(r) << ((bidx ^ 2) * 8);
+
+ return dst;
+ }
+
+ template <int bidx, typename T, typename D> __device__ __forceinline__ void YCrCb2RGBConvert_32F(const T& src, D& dst)
+ {
+ (bidx == 0 ? dst.x : dst.z) = src.x + (src.z - ColorChannel<float>::half()) * c_YCrCb2RGBCoeffs_f[3];
+ dst.y = src.x + (src.z - ColorChannel<float>::half()) * c_YCrCb2RGBCoeffs_f[2] + (src.y - ColorChannel<float>::half()) * c_YCrCb2RGBCoeffs_f[1];
+ (bidx == 0 ? dst.z : dst.x) = src.x + (src.y - ColorChannel<float>::half()) * c_YCrCb2RGBCoeffs_f[0];
+ }
+
+ template <typename T, int scn, int dcn, int bidx> struct YCrCb2RGB
+ : unary_function<typename TypeVec<T, scn>::vec_type, typename TypeVec<T, dcn>::vec_type>
+ {
+ __device__ __forceinline__ typename TypeVec<T, dcn>::vec_type operator ()(const typename TypeVec<T, scn>::vec_type& src) const
+ {
+ typename TypeVec<T, dcn>::vec_type dst;
+
+ YCrCb2RGBConvert<bidx>(src, dst);
+ setAlpha(dst, ColorChannel<T>::max());
+
+ return dst;
+ }
+
+ __host__ __device__ __forceinline__ YCrCb2RGB() {}
+ __host__ __device__ __forceinline__ YCrCb2RGB(const YCrCb2RGB&) {}
+ };
+
+ template <int scn, int dcn, int bidx> struct YCrCb2RGB<float, scn, dcn, bidx>
+ : unary_function<typename TypeVec<float, scn>::vec_type, typename TypeVec<float, dcn>::vec_type>
+ {
+ __device__ __forceinline__ typename TypeVec<float, dcn>::vec_type operator ()(const typename TypeVec<float, scn>::vec_type& src) const
+ {
+ typename TypeVec<float, dcn>::vec_type dst;
+
+ YCrCb2RGBConvert_32F<bidx>(src, dst);
+ setAlpha(dst, ColorChannel<float>::max());
+
+ return dst;
+ }
+
+ __host__ __device__ __forceinline__ YCrCb2RGB() {}
+ __host__ __device__ __forceinline__ YCrCb2RGB(const YCrCb2RGB&) {}
+ };
+
+ template <int bidx> struct YCrCb2RGB<uchar, 4, 4, bidx> : unary_function<uint, uint>
+ {
+ __device__ __forceinline__ uint operator ()(uint src) const
+ {
+ return YCrCb2RGBConvert_8UC4<bidx>(src);
+ }
+
+ __host__ __device__ __forceinline__ YCrCb2RGB() {}
+ __host__ __device__ __forceinline__ YCrCb2RGB(const YCrCb2RGB&) {}
+ };
+ }
+
+#define OPENCV_GPU_IMPLEMENT_YCrCb2RGB_TRAITS(name, scn, dcn, bidx) \
+ template <typename T> struct name ## _traits \
+ { \
+ typedef ::cv::gpu::device::color_detail::YCrCb2RGB<T, scn, dcn, bidx> functor_type; \
+ static __host__ __device__ __forceinline__ functor_type create_functor() \
+ { \
+ return functor_type(); \
+ } \
+ };
+
+////////////////////////////////////// RGB <-> XYZ ///////////////////////////////////////
+
+ namespace color_detail
+ {
+ __constant__ float c_RGB2XYZ_D65f[9] = { 0.412453f, 0.357580f, 0.180423f, 0.212671f, 0.715160f, 0.072169f, 0.019334f, 0.119193f, 0.950227f };
+ __constant__ int c_RGB2XYZ_D65i[9] = { 1689, 1465, 739, 871, 2929, 296, 79, 488, 3892 };
+
+ template <int bidx, typename T, typename D> static __device__ __forceinline__ void RGB2XYZConvert_8U(const T& src, D& dst)
+ {
+ const uint b = bidx == 0 ? src.x : src.z;
+ const uint g = src.y;
+ const uint r = bidx == 0 ? src.z : src.x;
+
+ dst.x = saturate_cast<typename VecTraits<D>::elem_type>(CV_DESCALE(r * c_RGB2XYZ_D65i[0] + g * c_RGB2XYZ_D65i[1] + b * c_RGB2XYZ_D65i[2], xyz_shift));
+ dst.y = saturate_cast<typename VecTraits<D>::elem_type>(CV_DESCALE(r * c_RGB2XYZ_D65i[3] + g * c_RGB2XYZ_D65i[4] + b * c_RGB2XYZ_D65i[5], xyz_shift));
+ dst.z = saturate_cast<typename VecTraits<D>::elem_type>(CV_DESCALE(r * c_RGB2XYZ_D65i[6] + g * c_RGB2XYZ_D65i[7] + b * c_RGB2XYZ_D65i[8], xyz_shift));
+ }
+
+ template <int bidx> static __device__ __forceinline__ uint RGB2XYZConvert_8UC4(uint src)
+ {
+ const uint b = 0xffu & (src >> (bidx * 8));
+ const uint g = 0xffu & (src >> 8);
+ const uint r = 0xffu & (src >> ((bidx ^ 2) * 8));
+
+ const uint x = saturate_cast<uchar>(CV_DESCALE(r * c_RGB2XYZ_D65i[0] + g * c_RGB2XYZ_D65i[1] + b * c_RGB2XYZ_D65i[2], xyz_shift));
+ const uint y = saturate_cast<uchar>(CV_DESCALE(r * c_RGB2XYZ_D65i[3] + g * c_RGB2XYZ_D65i[4] + b * c_RGB2XYZ_D65i[5], xyz_shift));
+ const uint z = saturate_cast<uchar>(CV_DESCALE(r * c_RGB2XYZ_D65i[6] + g * c_RGB2XYZ_D65i[7] + b * c_RGB2XYZ_D65i[8], xyz_shift));
+
+ uint dst = 0;
+
+ dst |= x;
+ dst |= y << 8;
+ dst |= z << 16;
+
+ return dst;
+ }
+
+ template <int bidx, typename T, typename D> static __device__ __forceinline__ void RGB2XYZConvert_32F(const T& src, D& dst)
+ {
+ const float b = bidx == 0 ? src.x : src.z;
+ const float g = src.y;
+ const float r = bidx == 0 ? src.z : src.x;
+
+ dst.x = r * c_RGB2XYZ_D65f[0] + g * c_RGB2XYZ_D65f[1] + b * c_RGB2XYZ_D65f[2];
+ dst.y = r * c_RGB2XYZ_D65f[3] + g * c_RGB2XYZ_D65f[4] + b * c_RGB2XYZ_D65f[5];
+ dst.z = r * c_RGB2XYZ_D65f[6] + g * c_RGB2XYZ_D65f[7] + b * c_RGB2XYZ_D65f[8];
+ }
+
+ template <typename T, int scn, int dcn, int bidx> struct RGB2XYZ
+ : unary_function<typename TypeVec<T, scn>::vec_type, typename TypeVec<T, dcn>::vec_type>
+ {
+ __device__ __forceinline__ typename TypeVec<T, dcn>::vec_type operator()(const typename TypeVec<T, scn>::vec_type& src) const
+ {
+ typename TypeVec<T, dcn>::vec_type dst;
+
+ RGB2XYZConvert_8U<bidx>(src, dst);
+
+ return dst;
+ }
+
+ __host__ __device__ __forceinline__ RGB2XYZ() {}
+ __host__ __device__ __forceinline__ RGB2XYZ(const RGB2XYZ&) {}
+ };
+
+ template <int scn, int dcn, int bidx> struct RGB2XYZ<float, scn, dcn, bidx>
+ : unary_function<typename TypeVec<float, scn>::vec_type, typename TypeVec<float, dcn>::vec_type>
+ {
+ __device__ __forceinline__ typename TypeVec<float, dcn>::vec_type operator()(const typename TypeVec<float, scn>::vec_type& src) const
+ {
+ typename TypeVec<float, dcn>::vec_type dst;
+
+ RGB2XYZConvert_32F<bidx>(src, dst);
+
+ return dst;
+ }
+
+ __host__ __device__ __forceinline__ RGB2XYZ() {}
+ __host__ __device__ __forceinline__ RGB2XYZ(const RGB2XYZ&) {}
+ };
+
+ template <int bidx> struct RGB2XYZ<uchar, 4, 4, bidx> : unary_function<uint, uint>
+ {
+ __device__ __forceinline__ uint operator()(uint src) const
+ {
+ return RGB2XYZConvert_8UC4<bidx>(src);
+ }
+
+ __host__ __device__ __forceinline__ RGB2XYZ() {}
+ __host__ __device__ __forceinline__ RGB2XYZ(const RGB2XYZ&) {}
+ };
+ }
+
+#define OPENCV_GPU_IMPLEMENT_RGB2XYZ_TRAITS(name, scn, dcn, bidx) \
+ template <typename T> struct name ## _traits \
+ { \
+ typedef ::cv::gpu::device::color_detail::RGB2XYZ<T, scn, dcn, bidx> functor_type; \
+ static __host__ __device__ __forceinline__ functor_type create_functor() \
+ { \
+ return functor_type(); \
+ } \
+ };
+
+ namespace color_detail
+ {
+ __constant__ float c_XYZ2sRGB_D65f[9] = { 3.240479f, -1.53715f, -0.498535f, -0.969256f, 1.875991f, 0.041556f, 0.055648f, -0.204043f, 1.057311f };
+ __constant__ int c_XYZ2sRGB_D65i[9] = { 13273, -6296, -2042, -3970, 7684, 170, 228, -836, 4331 };
+
+ template <int bidx, typename T, typename D> static __device__ __forceinline__ void XYZ2RGBConvert_8U(const T& src, D& dst)
+ {
+ (bidx == 0 ? dst.z : dst.x) = saturate_cast<typename VecTraits<D>::elem_type>(CV_DESCALE(src.x * c_XYZ2sRGB_D65i[0] + src.y * c_XYZ2sRGB_D65i[1] + src.z * c_XYZ2sRGB_D65i[2], xyz_shift));
+ dst.y = saturate_cast<typename VecTraits<D>::elem_type>(CV_DESCALE(src.x * c_XYZ2sRGB_D65i[3] + src.y * c_XYZ2sRGB_D65i[4] + src.z * c_XYZ2sRGB_D65i[5], xyz_shift));
+ (bidx == 0 ? dst.x : dst.z) = saturate_cast<typename VecTraits<D>::elem_type>(CV_DESCALE(src.x * c_XYZ2sRGB_D65i[6] + src.y * c_XYZ2sRGB_D65i[7] + src.z * c_XYZ2sRGB_D65i[8], xyz_shift));
+ }
+
+ template <int bidx> static __device__ __forceinline__ uint XYZ2RGBConvert_8UC4(uint src)
+ {
+ const int x = 0xff & src;
+ const int y = 0xff & (src >> 8);
+ const int z = 0xff & (src >> 16);
+
+ const uint r = saturate_cast<uchar>(CV_DESCALE(x * c_XYZ2sRGB_D65i[0] + y * c_XYZ2sRGB_D65i[1] + z * c_XYZ2sRGB_D65i[2], xyz_shift));
+ const uint g = saturate_cast<uchar>(CV_DESCALE(x * c_XYZ2sRGB_D65i[3] + y * c_XYZ2sRGB_D65i[4] + z * c_XYZ2sRGB_D65i[5], xyz_shift));
+ const uint b = saturate_cast<uchar>(CV_DESCALE(x * c_XYZ2sRGB_D65i[6] + y * c_XYZ2sRGB_D65i[7] + z * c_XYZ2sRGB_D65i[8], xyz_shift));
+
+ uint dst = 0xffu << 24;
+
+ dst |= b << (bidx * 8);
+ dst |= g << 8;
+ dst |= r << ((bidx ^ 2) * 8);
+
+ return dst;
+ }
+
+ template <int bidx, typename T, typename D> static __device__ __forceinline__ void XYZ2RGBConvert_32F(const T& src, D& dst)
+ {
+ (bidx == 0 ? dst.z : dst.x) = src.x * c_XYZ2sRGB_D65f[0] + src.y * c_XYZ2sRGB_D65f[1] + src.z * c_XYZ2sRGB_D65f[2];
+ dst.y = src.x * c_XYZ2sRGB_D65f[3] + src.y * c_XYZ2sRGB_D65f[4] + src.z * c_XYZ2sRGB_D65f[5];
+ (bidx == 0 ? dst.x : dst.z) = src.x * c_XYZ2sRGB_D65f[6] + src.y * c_XYZ2sRGB_D65f[7] + src.z * c_XYZ2sRGB_D65f[8];
+ }
+
+ template <typename T, int scn, int dcn, int bidx> struct XYZ2RGB
+ : unary_function<typename TypeVec<T, scn>::vec_type, typename TypeVec<T, dcn>::vec_type>
+ {
+ __device__ __forceinline__ typename TypeVec<T, dcn>::vec_type operator()(const typename TypeVec<T, scn>::vec_type& src) const
+ {
+ typename TypeVec<T, dcn>::vec_type dst;
+
+ XYZ2RGBConvert_8U<bidx>(src, dst);
+ setAlpha(dst, ColorChannel<T>::max());
+
+ return dst;
+ }
+
+ __host__ __device__ __forceinline__ XYZ2RGB() {}
+ __host__ __device__ __forceinline__ XYZ2RGB(const XYZ2RGB&) {}
+ };
+
+ template <int scn, int dcn, int bidx> struct XYZ2RGB<float, scn, dcn, bidx>
+ : unary_function<typename TypeVec<float, scn>::vec_type, typename TypeVec<float, dcn>::vec_type>
+ {
+ __device__ __forceinline__ typename TypeVec<float, dcn>::vec_type operator()(const typename TypeVec<float, scn>::vec_type& src) const
+ {
+ typename TypeVec<float, dcn>::vec_type dst;
+
+ XYZ2RGBConvert_32F<bidx>(src, dst);
+ setAlpha(dst, ColorChannel<float>::max());
+
+ return dst;
+ }
+
+ __host__ __device__ __forceinline__ XYZ2RGB() {}
+ __host__ __device__ __forceinline__ XYZ2RGB(const XYZ2RGB&) {}
+ };
+
+ template <int bidx> struct XYZ2RGB<uchar, 4, 4, bidx> : unary_function<uint, uint>
+ {
+ __device__ __forceinline__ uint operator()(uint src) const
+ {
+ return XYZ2RGBConvert_8UC4<bidx>(src);
+ }
+
+ __host__ __device__ __forceinline__ XYZ2RGB() {}
+ __host__ __device__ __forceinline__ XYZ2RGB(const XYZ2RGB&) {}
+ };
+ }
+
+#define OPENCV_GPU_IMPLEMENT_XYZ2RGB_TRAITS(name, scn, dcn, bidx) \
+ template <typename T> struct name ## _traits \
+ { \
+ typedef ::cv::gpu::device::color_detail::XYZ2RGB<T, scn, dcn, bidx> functor_type; \
+ static __host__ __device__ __forceinline__ functor_type create_functor() \
+ { \
+ return functor_type(); \
+ } \
+ };
+
+////////////////////////////////////// RGB <-> HSV ///////////////////////////////////////
+
+ namespace color_detail
+ {
+ __constant__ int c_HsvDivTable [256] = {0, 1044480, 522240, 348160, 261120, 208896, 174080, 149211, 130560, 116053, 104448, 94953, 87040, 80345, 74606, 69632, 65280, 61440, 58027, 54973, 52224, 49737, 47476, 45412, 43520, 41779, 40172, 38684, 37303, 36017, 34816, 33693, 32640, 31651, 30720, 29842, 29013, 28229, 27486, 26782, 26112, 25475, 24869, 24290, 23738, 23211, 22706, 22223, 21760, 21316, 20890, 20480, 20086, 19707, 19342, 18991, 18651, 18324, 18008, 17703, 17408, 17123, 16846, 16579, 16320, 16069, 15825, 15589, 15360, 15137, 14921, 14711, 14507, 14308, 14115, 13926, 13743, 13565, 13391, 13221, 13056, 12895, 12738, 12584, 12434, 12288, 12145, 12006, 11869, 11736, 11605, 11478, 11353, 11231, 11111, 10995, 10880, 10768, 10658, 10550, 10445, 10341, 10240, 10141, 10043, 9947, 9854, 9761, 9671, 9582, 9495, 9410, 9326, 9243, 9162, 9082, 9004, 8927, 8852, 8777, 8704, 8632, 8561, 8492, 8423, 8356, 8290, 8224, 8160, 8097, 8034, 7973, 7913, 7853, 7795, 7737, 7680, 7624, 7569, 7514, 7461, 7408, 7355, 7304, 7253, 7203, 7154, 7105, 7057, 7010, 6963, 6917, 6872, 6827, 6782, 6739, 6695, 6653, 6611, 6569, 6528, 6487, 6447, 6408, 6369, 6330, 6292, 6254, 6217, 6180, 6144, 6108, 6073, 6037, 6003, 5968, 5935, 5901, 5868, 5835, 5803, 5771, 5739, 5708, 5677, 5646, 5615, 5585, 5556, 5526, 5497, 5468, 5440, 5412, 5384, 5356, 5329, 5302, 5275, 5249, 5222, 5196, 5171, 5145, 5120, 5095, 5070, 5046, 5022, 4998, 4974, 4950, 4927, 4904, 4881, 4858, 4836, 4813, 4791, 4769, 4748, 4726, 4705, 4684, 4663, 4642, 4622, 4601, 4581, 4561, 4541, 4522, 4502, 4483, 4464, 4445, 4426, 4407, 4389, 4370, 4352, 4334, 4316, 4298, 4281, 4263, 4246, 4229, 4212, 4195, 4178, 4161, 4145, 4128, 4112, 4096};
+ __constant__ int c_HsvDivTable180[256] = {0, 122880, 61440, 40960, 30720, 24576, 20480, 17554, 15360, 13653, 12288, 11171, 10240, 9452, 8777, 8192, 7680, 7228, 6827, 6467, 6144, 5851, 5585, 5343, 5120, 4915, 4726, 4551, 4389, 4237, 4096, 3964, 3840, 3724, 3614, 3511, 3413, 3321, 3234, 3151, 3072, 2997, 2926, 2858, 2793, 2731, 2671, 2614, 2560, 2508, 2458, 2409, 2363, 2318, 2276, 2234, 2194, 2156, 2119, 2083, 2048, 2014, 1982, 1950, 1920, 1890, 1862, 1834, 1807, 1781, 1755, 1731, 1707, 1683, 1661, 1638, 1617, 1596, 1575, 1555, 1536, 1517, 1499, 1480, 1463, 1446, 1429, 1412, 1396, 1381, 1365, 1350, 1336, 1321, 1307, 1293, 1280, 1267, 1254, 1241, 1229, 1217, 1205, 1193, 1182, 1170, 1159, 1148, 1138, 1127, 1117, 1107, 1097, 1087, 1078, 1069, 1059, 1050, 1041, 1033, 1024, 1016, 1007, 999, 991, 983, 975, 968, 960, 953, 945, 938, 931, 924, 917, 910, 904, 897, 890, 884, 878, 871, 865, 859, 853, 847, 842, 836, 830, 825, 819, 814, 808, 803, 798, 793, 788, 783, 778, 773, 768, 763, 759, 754, 749, 745, 740, 736, 731, 727, 723, 719, 714, 710, 706, 702, 698, 694, 690, 686, 683, 679, 675, 671, 668, 664, 661, 657, 654, 650, 647, 643, 640, 637, 633, 630, 627, 624, 621, 617, 614, 611, 608, 605, 602, 599, 597, 594, 591, 588, 585, 582, 580, 577, 574, 572, 569, 566, 564, 561, 559, 556, 554, 551, 549, 546, 544, 541, 539, 537, 534, 532, 530, 527, 525, 523, 521, 518, 516, 514, 512, 510, 508, 506, 504, 502, 500, 497, 495, 493, 492, 490, 488, 486, 484, 482};
+ __constant__ int c_HsvDivTable256[256] = {0, 174763, 87381, 58254, 43691, 34953, 29127, 24966, 21845, 19418, 17476, 15888, 14564, 13443, 12483, 11651, 10923, 10280, 9709, 9198, 8738, 8322, 7944, 7598, 7282, 6991, 6722, 6473, 6242, 6026, 5825, 5638, 5461, 5296, 5140, 4993, 4855, 4723, 4599, 4481, 4369, 4263, 4161, 4064, 3972, 3884, 3799, 3718, 3641, 3567, 3495, 3427, 3361, 3297, 3236, 3178, 3121, 3066, 3013, 2962, 2913, 2865, 2819, 2774, 2731, 2689, 2648, 2608, 2570, 2533, 2497, 2461, 2427, 2394, 2362, 2330, 2300, 2270, 2241, 2212, 2185, 2158, 2131, 2106, 2081, 2056, 2032, 2009, 1986, 1964, 1942, 1920, 1900, 1879, 1859, 1840, 1820, 1802, 1783, 1765, 1748, 1730, 1713, 1697, 1680, 1664, 1649, 1633, 1618, 1603, 1589, 1574, 1560, 1547, 1533, 1520, 1507, 1494, 1481, 1469, 1456, 1444, 1432, 1421, 1409, 1398, 1387, 1376, 1365, 1355, 1344, 1334, 1324, 1314, 1304, 1295, 1285, 1276, 1266, 1257, 1248, 1239, 1231, 1222, 1214, 1205, 1197, 1189, 1181, 1173, 1165, 1157, 1150, 1142, 1135, 1128, 1120, 1113, 1106, 1099, 1092, 1085, 1079, 1072, 1066, 1059, 1053, 1046, 1040, 1034, 1028, 1022, 1016, 1010, 1004, 999, 993, 987, 982, 976, 971, 966, 960, 955, 950, 945, 940, 935, 930, 925, 920, 915, 910, 906, 901, 896, 892, 887, 883, 878, 874, 869, 865, 861, 857, 853, 848, 844, 840, 836, 832, 828, 824, 820, 817, 813, 809, 805, 802, 798, 794, 791, 787, 784, 780, 777, 773, 770, 767, 763, 760, 757, 753, 750, 747, 744, 741, 737, 734, 731, 728, 725, 722, 719, 716, 713, 710, 708, 705, 702, 699, 696, 694, 691, 688, 685};
+
+ template <int bidx, int hr, typename T, typename D> static __device__ void RGB2HSVConvert_8U(const T& src, D& dst)
+ {
+ const int hsv_shift = 12;
+ const int* hdiv_table = hr == 180 ? c_HsvDivTable180 : c_HsvDivTable256;
+
+ int b = bidx == 0 ? src.x : src.z;
+ int g = src.y;
+ int r = bidx == 0 ? src.z : src.x;
+
+ int h, s, v = b;
+ int vmin = b, diff;
+ int vr, vg;
+
+ v = ::max(v, g);
+ v = ::max(v, r);
+ vmin = ::min(vmin, g);
+ vmin = ::min(vmin, r);
+
+ diff = v - vmin;
+ vr = (v == r) * -1;
+ vg = (v == g) * -1;
+
+ s = (diff * c_HsvDivTable[v] + (1 << (hsv_shift-1))) >> hsv_shift;
+ h = (vr & (g - b)) + (~vr & ((vg & (b - r + 2 * diff)) + ((~vg) & (r - g + 4 * diff))));
+ h = (h * hdiv_table[diff] + (1 << (hsv_shift-1))) >> hsv_shift;
+ h += (h < 0) * hr;
+
+ dst.x = saturate_cast<uchar>(h);
+ dst.y = (uchar)s;
+ dst.z = (uchar)v;
+ }
+
+ template <int bidx, int hr> static __device__ uint RGB2HSVConvert_8UC4(uint src)
+ {
+ const int hsv_shift = 12;
+ const int* hdiv_table = hr == 180 ? c_HsvDivTable180 : c_HsvDivTable256;
+
+ const int b = 0xff & (src >> (bidx * 8));
+ const int g = 0xff & (src >> 8);
+ const int r = 0xff & (src >> ((bidx ^ 2) * 8));
+
+ int h, s, v = b;
+ int vmin = b, diff;
+ int vr, vg;
+
+ v = ::max(v, g);
+ v = ::max(v, r);
+ vmin = ::min(vmin, g);
+ vmin = ::min(vmin, r);
+
+ diff = v - vmin;
+ vr = (v == r) * -1;
+ vg = (v == g) * -1;
+
+ s = (diff * c_HsvDivTable[v] + (1 << (hsv_shift-1))) >> hsv_shift;
+ h = (vr & (g - b)) + (~vr & ((vg & (b - r + 2 * diff)) + ((~vg) & (r - g + 4 * diff))));
+ h = (h * hdiv_table[diff] + (1 << (hsv_shift-1))) >> hsv_shift;
+ h += (h < 0) * hr;
+
+ uint dst = 0;
+
+ dst |= saturate_cast<uchar>(h);
+ dst |= (0xffu & s) << 8;
+ dst |= (0xffu & v) << 16;
+
+ return dst;
+ }
+
+ template <int bidx, int hr, typename T, typename D> static __device__ void RGB2HSVConvert_32F(const T& src, D& dst)
+ {
+ const float hscale = hr * (1.f / 360.f);
+
+ float b = bidx == 0 ? src.x : src.z;
+ float g = src.y;
+ float r = bidx == 0 ? src.z : src.x;
+
+ float h, s, v;
+
+ float vmin, diff;
+
+ v = vmin = r;
+ v = fmax(v, g);
+ v = fmax(v, b);
+ vmin = fmin(vmin, g);
+ vmin = fmin(vmin, b);
+
+ diff = v - vmin;
+ s = diff / (float)(::fabs(v) + numeric_limits<float>::epsilon());
+ diff = (float)(60. / (diff + numeric_limits<float>::epsilon()));
+
+ h = (v == r) * (g - b) * diff;
+ h += (v != r && v == g) * ((b - r) * diff + 120.f);
+ h += (v != r && v != g) * ((r - g) * diff + 240.f);
+ h += (h < 0) * 360.f;
+
+ dst.x = h * hscale;
+ dst.y = s;
+ dst.z = v;
+ }
+
+ template <typename T, int scn, int dcn, int bidx, int hr> struct RGB2HSV
+ : unary_function<typename TypeVec<T, scn>::vec_type, typename TypeVec<T, dcn>::vec_type>
+ {
+ __device__ __forceinline__ typename TypeVec<T, dcn>::vec_type operator()(const typename TypeVec<T, scn>::vec_type& src) const
+ {
+ typename TypeVec<T, dcn>::vec_type dst;
+
+ RGB2HSVConvert_8U<bidx, hr>(src, dst);
+
+ return dst;
+ }
+
+ __host__ __device__ __forceinline__ RGB2HSV() {}
+ __host__ __device__ __forceinline__ RGB2HSV(const RGB2HSV&) {}
+ };
+
+ template <int scn, int dcn, int bidx, int hr> struct RGB2HSV<float, scn, dcn, bidx, hr>
+ : unary_function<typename TypeVec<float, scn>::vec_type, typename TypeVec<float, dcn>::vec_type>
+ {
+ __device__ __forceinline__ typename TypeVec<float, dcn>::vec_type operator()(const typename TypeVec<float, scn>::vec_type& src) const
+ {
+ typename TypeVec<float, dcn>::vec_type dst;
+
+ RGB2HSVConvert_32F<bidx, hr>(src, dst);
+
+ return dst;
+ }
+
+ __host__ __device__ __forceinline__ RGB2HSV() {}
+ __host__ __device__ __forceinline__ RGB2HSV(const RGB2HSV&) {}
+ };
+
+ template <int bidx, int hr> struct RGB2HSV<uchar, 4, 4, bidx, hr> : unary_function<uint, uint>
+ {
+ __device__ __forceinline__ uint operator()(uint src) const
+ {
+ return RGB2HSVConvert_8UC4<bidx, hr>(src);
+ }
+
+ __host__ __device__ __forceinline__ RGB2HSV() {}
+ __host__ __device__ __forceinline__ RGB2HSV(const RGB2HSV&) {}
+ };
+ }
+
+#define OPENCV_GPU_IMPLEMENT_RGB2HSV_TRAITS(name, scn, dcn, bidx) \
+ template <typename T> struct name ## _traits \
+ { \
+ typedef ::cv::gpu::device::color_detail::RGB2HSV<T, scn, dcn, bidx, 180> functor_type; \
+ static __host__ __device__ __forceinline__ functor_type create_functor() \
+ { \
+ return functor_type(); \
+ } \
+ }; \
+ template <typename T> struct name ## _full_traits \
+ { \
+ typedef ::cv::gpu::device::color_detail::RGB2HSV<T, scn, dcn, bidx, 256> functor_type; \
+ static __host__ __device__ __forceinline__ functor_type create_functor() \
+ { \
+ return functor_type(); \
+ } \
+ }; \
+ template <> struct name ## _traits<float> \
+ { \
+ typedef ::cv::gpu::device::color_detail::RGB2HSV<float, scn, dcn, bidx, 360> functor_type; \
+ static __host__ __device__ __forceinline__ functor_type create_functor() \
+ { \
+ return functor_type(); \
+ } \
+ }; \
+ template <> struct name ## _full_traits<float> \
+ { \
+ typedef ::cv::gpu::device::color_detail::RGB2HSV<float, scn, dcn, bidx, 360> functor_type; \
+ static __host__ __device__ __forceinline__ functor_type create_functor() \
+ { \
+ return functor_type(); \
+ } \
+ };
+
+ namespace color_detail
+ {
+ __constant__ int c_HsvSectorData[6][3] = { {1,3,0}, {1,0,2}, {3,0,1}, {0,2,1}, {0,1,3}, {2,1,0} };
+
+ template <int bidx, int hr, typename T, typename D> static __device__ void HSV2RGBConvert_32F(const T& src, D& dst)
+ {
+ const float hscale = 6.f / hr;
+
+ float h = src.x, s = src.y, v = src.z;
+ float b = v, g = v, r = v;
+
+ if (s != 0)
+ {
+ h *= hscale;
+
+ if( h < 0 )
+ do h += 6; while( h < 0 );
+ else if( h >= 6 )
+ do h -= 6; while( h >= 6 );
+
+ int sector = __float2int_rd(h);
+ h -= sector;
+
+ if ( (unsigned)sector >= 6u )
+ {
+ sector = 0;
+ h = 0.f;
+ }
+
+ float tab[4];
+ tab[0] = v;
+ tab[1] = v * (1.f - s);
+ tab[2] = v * (1.f - s * h);
+ tab[3] = v * (1.f - s * (1.f - h));
+
+ b = tab[c_HsvSectorData[sector][0]];
+ g = tab[c_HsvSectorData[sector][1]];
+ r = tab[c_HsvSectorData[sector][2]];
+ }
+
+ dst.x = (bidx == 0 ? b : r);
+ dst.y = g;
+ dst.z = (bidx == 0 ? r : b);
+ }
+
+ template <int bidx, int HR, typename T, typename D> static __device__ void HSV2RGBConvert_8U(const T& src, D& dst)
+ {
+ float3 buf;
+
+ buf.x = src.x;
+ buf.y = src.y * (1.f / 255.f);
+ buf.z = src.z * (1.f / 255.f);
+
+ HSV2RGBConvert_32F<bidx, HR>(buf, buf);
+
+ dst.x = saturate_cast<uchar>(buf.x * 255.f);
+ dst.y = saturate_cast<uchar>(buf.y * 255.f);
+ dst.z = saturate_cast<uchar>(buf.z * 255.f);
+ }
+
+ template <int bidx, int hr> static __device__ uint HSV2RGBConvert_8UC4(uint src)
+ {
+ float3 buf;
+
+ buf.x = src & 0xff;
+ buf.y = ((src >> 8) & 0xff) * (1.f/255.f);
+ buf.z = ((src >> 16) & 0xff) * (1.f/255.f);
+
+ HSV2RGBConvert_32F<bidx, hr>(buf, buf);
+
+ uint dst = 0xffu << 24;
+
+ dst |= saturate_cast<uchar>(buf.x * 255.f);
+ dst |= saturate_cast<uchar>(buf.y * 255.f) << 8;
+ dst |= saturate_cast<uchar>(buf.z * 255.f) << 16;
+
+ return dst;
+ }
+
+ template <typename T, int scn, int dcn, int bidx, int hr> struct HSV2RGB
+ : unary_function<typename TypeVec<T, scn>::vec_type, typename TypeVec<T, dcn>::vec_type>
+ {
+ __device__ __forceinline__ typename TypeVec<T, dcn>::vec_type operator()(const typename TypeVec<T, scn>::vec_type& src) const
+ {
+ typename TypeVec<T, dcn>::vec_type dst;
+
+ HSV2RGBConvert_8U<bidx, hr>(src, dst);
+ setAlpha(dst, ColorChannel<T>::max());
+
+ return dst;
+ }
+
+ __host__ __device__ __forceinline__ HSV2RGB() {}
+ __host__ __device__ __forceinline__ HSV2RGB(const HSV2RGB&) {}
+ };
+
+ template <int scn, int dcn, int bidx, int hr> struct HSV2RGB<float, scn, dcn, bidx, hr>
+ : unary_function<typename TypeVec<float, scn>::vec_type, typename TypeVec<float, dcn>::vec_type>
+ {
+ __device__ __forceinline__ typename TypeVec<float, dcn>::vec_type operator()(const typename TypeVec<float, scn>::vec_type& src) const
+ {
+ typename TypeVec<float, dcn>::vec_type dst;
+
+ HSV2RGBConvert_32F<bidx, hr>(src, dst);
+ setAlpha(dst, ColorChannel<float>::max());
+
+ return dst;
+ }
+
+ __host__ __device__ __forceinline__ HSV2RGB() {}
+ __host__ __device__ __forceinline__ HSV2RGB(const HSV2RGB&) {}
+ };
+
+ template <int bidx, int hr> struct HSV2RGB<uchar, 4, 4, bidx, hr> : unary_function<uint, uint>
+ {
+ __device__ __forceinline__ uint operator()(uint src) const
+ {
+ return HSV2RGBConvert_8UC4<bidx, hr>(src);
+ }
+
+ __host__ __device__ __forceinline__ HSV2RGB() {}
+ __host__ __device__ __forceinline__ HSV2RGB(const HSV2RGB&) {}
+ };
+ }
+
+#define OPENCV_GPU_IMPLEMENT_HSV2RGB_TRAITS(name, scn, dcn, bidx) \
+ template <typename T> struct name ## _traits \
+ { \
+ typedef ::cv::gpu::device::color_detail::HSV2RGB<T, scn, dcn, bidx, 180> functor_type; \
+ static __host__ __device__ __forceinline__ functor_type create_functor() \
+ { \
+ return functor_type(); \
+ } \
+ }; \
+ template <typename T> struct name ## _full_traits \
+ { \
+ typedef ::cv::gpu::device::color_detail::HSV2RGB<T, scn, dcn, bidx, 255> functor_type; \
+ static __host__ __device__ __forceinline__ functor_type create_functor() \
+ { \
+ return functor_type(); \
+ } \
+ }; \
+ template <> struct name ## _traits<float> \
+ { \
+ typedef ::cv::gpu::device::color_detail::HSV2RGB<float, scn, dcn, bidx, 360> functor_type; \
+ static __host__ __device__ __forceinline__ functor_type create_functor() \
+ { \
+ return functor_type(); \
+ } \
+ }; \
+ template <> struct name ## _full_traits<float> \
+ { \
+ typedef ::cv::gpu::device::color_detail::HSV2RGB<float, scn, dcn, bidx, 360> functor_type; \
+ static __host__ __device__ __forceinline__ functor_type create_functor() \
+ { \
+ return functor_type(); \
+ } \
+ };
+
+/////////////////////////////////////// RGB <-> HLS ////////////////////////////////////////
+
+ namespace color_detail
+ {
+ template <int bidx, int hr, typename T, typename D> static __device__ void RGB2HLSConvert_32F(const T& src, D& dst)
+ {
+ const float hscale = hr * (1.f / 360.f);
+
+ float b = bidx == 0 ? src.x : src.z;
+ float g = src.y;
+ float r = bidx == 0 ? src.z : src.x;
+
+ float h = 0.f, s = 0.f, l;
+ float vmin, vmax, diff;
+
+ vmax = vmin = r;
+ vmax = fmax(vmax, g);
+ vmax = fmax(vmax, b);
+ vmin = fmin(vmin, g);
+ vmin = fmin(vmin, b);
+
+ diff = vmax - vmin;
+ l = (vmax + vmin) * 0.5f;
+
+ if (diff > numeric_limits<float>::epsilon())
+ {
+ s = (l < 0.5f) * diff / (vmax + vmin);
+ s += (l >= 0.5f) * diff / (2.0f - vmax - vmin);
+
+ diff = 60.f / diff;
+
+ h = (vmax == r) * (g - b) * diff;
+ h += (vmax != r && vmax == g) * ((b - r) * diff + 120.f);
+ h += (vmax != r && vmax != g) * ((r - g) * diff + 240.f);
+ h += (h < 0.f) * 360.f;
+ }
+
+ dst.x = h * hscale;
+ dst.y = l;
+ dst.z = s;
+ }
+
+ template <int bidx, int hr, typename T, typename D> static __device__ void RGB2HLSConvert_8U(const T& src, D& dst)
+ {
+ float3 buf;
+
+ buf.x = src.x * (1.f / 255.f);
+ buf.y = src.y * (1.f / 255.f);
+ buf.z = src.z * (1.f / 255.f);
+
+ RGB2HLSConvert_32F<bidx, hr>(buf, buf);
+
+ dst.x = saturate_cast<uchar>(buf.x);
+ dst.y = saturate_cast<uchar>(buf.y*255.f);
+ dst.z = saturate_cast<uchar>(buf.z*255.f);
+ }
+
+ template <int bidx, int hr> static __device__ uint RGB2HLSConvert_8UC4(uint src)
+ {
+ float3 buf;
+
+ buf.x = (0xff & src) * (1.f / 255.f);
+ buf.y = (0xff & (src >> 8)) * (1.f / 255.f);
+ buf.z = (0xff & (src >> 16)) * (1.f / 255.f);
+
+ RGB2HLSConvert_32F<bidx, hr>(buf, buf);
+
+ uint dst = 0xffu << 24;
+
+ dst |= saturate_cast<uchar>(buf.x);
+ dst |= saturate_cast<uchar>(buf.y * 255.f) << 8;
+ dst |= saturate_cast<uchar>(buf.z * 255.f) << 16;
+
+ return dst;
+ }
+
+ template <typename T, int scn, int dcn, int bidx, int hr> struct RGB2HLS
+ : unary_function<typename TypeVec<T, scn>::vec_type, typename TypeVec<T, dcn>::vec_type>
+ {
+ __device__ __forceinline__ typename TypeVec<T, dcn>::vec_type operator()(const typename TypeVec<T, scn>::vec_type& src) const
+ {
+ typename TypeVec<T, dcn>::vec_type dst;
+
+ RGB2HLSConvert_8U<bidx, hr>(src, dst);
+
+ return dst;
+ }
+
+ __host__ __device__ __forceinline__ RGB2HLS() {}
+ __host__ __device__ __forceinline__ RGB2HLS(const RGB2HLS&) {}
+ };
+
+ template <int scn, int dcn, int bidx, int hr> struct RGB2HLS<float, scn, dcn, bidx, hr>
+ : unary_function<typename TypeVec<float, scn>::vec_type, typename TypeVec<float, dcn>::vec_type>
+ {
+ __device__ __forceinline__ typename TypeVec<float, dcn>::vec_type operator()(const typename TypeVec<float, scn>::vec_type& src) const
+ {
+ typename TypeVec<float, dcn>::vec_type dst;
+
+ RGB2HLSConvert_32F<bidx, hr>(src, dst);
+
+ return dst;
+ }
+
+ __host__ __device__ __forceinline__ RGB2HLS() {}
+ __host__ __device__ __forceinline__ RGB2HLS(const RGB2HLS&) {}
+ };
+
+ template <int bidx, int hr> struct RGB2HLS<uchar, 4, 4, bidx, hr> : unary_function<uint, uint>
+ {
+ __device__ __forceinline__ uint operator()(uint src) const
+ {
+ return RGB2HLSConvert_8UC4<bidx, hr>(src);
+ }
+
+ __host__ __device__ __forceinline__ RGB2HLS() {}
+ __host__ __device__ __forceinline__ RGB2HLS(const RGB2HLS&) {}
+ };
+ }
+
+#define OPENCV_GPU_IMPLEMENT_RGB2HLS_TRAITS(name, scn, dcn, bidx) \
+ template <typename T> struct name ## _traits \
+ { \
+ typedef ::cv::gpu::device::color_detail::RGB2HLS<T, scn, dcn, bidx, 180> functor_type; \
+ static __host__ __device__ __forceinline__ functor_type create_functor() \
+ { \
+ return functor_type(); \
+ } \
+ }; \
+ template <typename T> struct name ## _full_traits \
+ { \
+ typedef ::cv::gpu::device::color_detail::RGB2HLS<T, scn, dcn, bidx, 256> functor_type; \
+ static __host__ __device__ __forceinline__ functor_type create_functor() \
+ { \
+ return functor_type(); \
+ } \
+ }; \
+ template <> struct name ## _traits<float> \
+ { \
+ typedef ::cv::gpu::device::color_detail::RGB2HLS<float, scn, dcn, bidx, 360> functor_type; \
+ static __host__ __device__ __forceinline__ functor_type create_functor() \
+ { \
+ return functor_type(); \
+ } \
+ }; \
+ template <> struct name ## _full_traits<float> \
+ { \
+ typedef ::cv::gpu::device::color_detail::RGB2HLS<float, scn, dcn, bidx, 360> functor_type; \
+ static __host__ __device__ __forceinline__ functor_type create_functor() \
+ { \
+ return functor_type(); \
+ } \
+ };
+
+ namespace color_detail
+ {
+ __constant__ int c_HlsSectorData[6][3] = { {1,3,0}, {1,0,2}, {3,0,1}, {0,2,1}, {0,1,3}, {2,1,0} };
+
+ template <int bidx, int hr, typename T, typename D> static __device__ void HLS2RGBConvert_32F(const T& src, D& dst)
+ {
+ const float hscale = 6.0f / hr;
+
+ float h = src.x, l = src.y, s = src.z;
+ float b = l, g = l, r = l;
+
+ if (s != 0)
+ {
+ float p2 = (l <= 0.5f) * l * (1 + s);
+ p2 += (l > 0.5f) * (l + s - l * s);
+ float p1 = 2 * l - p2;
+
+ h *= hscale;
+
+ if( h < 0 )
+ do h += 6; while( h < 0 );
+ else if( h >= 6 )
+ do h -= 6; while( h >= 6 );
+
+ int sector;
+ sector = __float2int_rd(h);
+
+ h -= sector;
+
+ float tab[4];
+ tab[0] = p2;
+ tab[1] = p1;
+ tab[2] = p1 + (p2 - p1) * (1 - h);
+ tab[3] = p1 + (p2 - p1) * h;
+
+ b = tab[c_HlsSectorData[sector][0]];
+ g = tab[c_HlsSectorData[sector][1]];
+ r = tab[c_HlsSectorData[sector][2]];
+ }
+
+ dst.x = bidx == 0 ? b : r;
+ dst.y = g;
+ dst.z = bidx == 0 ? r : b;
+ }
+
+ template <int bidx, int hr, typename T, typename D> static __device__ void HLS2RGBConvert_8U(const T& src, D& dst)
+ {
+ float3 buf;
+
+ buf.x = src.x;
+ buf.y = src.y * (1.f / 255.f);
+ buf.z = src.z * (1.f / 255.f);
+
+ HLS2RGBConvert_32F<bidx, hr>(buf, buf);
+
+ dst.x = saturate_cast<uchar>(buf.x * 255.f);
+ dst.y = saturate_cast<uchar>(buf.y * 255.f);
+ dst.z = saturate_cast<uchar>(buf.z * 255.f);
+ }
+
+ template <int bidx, int hr> static __device__ uint HLS2RGBConvert_8UC4(uint src)
+ {
+ float3 buf;
+
+ buf.x = 0xff & src;
+ buf.y = (0xff & (src >> 8)) * (1.f / 255.f);
+ buf.z = (0xff & (src >> 16)) * (1.f / 255.f);
+
+ HLS2RGBConvert_32F<bidx, hr>(buf, buf);
+
+ uint dst = 0xffu << 24;
+
+ dst |= saturate_cast<uchar>(buf.x * 255.f);
+ dst |= saturate_cast<uchar>(buf.y * 255.f) << 8;
+ dst |= saturate_cast<uchar>(buf.z * 255.f) << 16;
+
+ return dst;
+ }
+
+ template <typename T, int scn, int dcn, int bidx, int hr> struct HLS2RGB
+ : unary_function<typename TypeVec<T, scn>::vec_type, typename TypeVec<T, dcn>::vec_type>
+ {
+ __device__ __forceinline__ typename TypeVec<T, dcn>::vec_type operator()(const typename TypeVec<T, scn>::vec_type& src) const
+ {
+ typename TypeVec<T, dcn>::vec_type dst;
+
+ HLS2RGBConvert_8U<bidx, hr>(src, dst);
+ setAlpha(dst, ColorChannel<T>::max());
+
+ return dst;
+ }
+
+ __host__ __device__ __forceinline__ HLS2RGB() {}
+ __host__ __device__ __forceinline__ HLS2RGB(const HLS2RGB&) {}
+ };
+
+ template <int scn, int dcn, int bidx, int hr> struct HLS2RGB<float, scn, dcn, bidx, hr>
+ : unary_function<typename TypeVec<float, scn>::vec_type, typename TypeVec<float, dcn>::vec_type>
+ {
+ __device__ __forceinline__ typename TypeVec<float, dcn>::vec_type operator()(const typename TypeVec<float, scn>::vec_type& src) const
+ {
+ typename TypeVec<float, dcn>::vec_type dst;
+
+ HLS2RGBConvert_32F<bidx, hr>(src, dst);
+ setAlpha(dst, ColorChannel<float>::max());
+
+ return dst;
+ }
+
+ __host__ __device__ __forceinline__ HLS2RGB() {}
+ __host__ __device__ __forceinline__ HLS2RGB(const HLS2RGB&) {}
+ };
+
+ template <int bidx, int hr> struct HLS2RGB<uchar, 4, 4, bidx, hr> : unary_function<uint, uint>
+ {
+ __device__ __forceinline__ uint operator()(uint src) const
+ {
+ return HLS2RGBConvert_8UC4<bidx, hr>(src);
+ }
+
+ __host__ __device__ __forceinline__ HLS2RGB() {}
+ __host__ __device__ __forceinline__ HLS2RGB(const HLS2RGB&) {}
+ };
+ }
+
+#define OPENCV_GPU_IMPLEMENT_HLS2RGB_TRAITS(name, scn, dcn, bidx) \
+ template <typename T> struct name ## _traits \
+ { \
+ typedef ::cv::gpu::device::color_detail::HLS2RGB<T, scn, dcn, bidx, 180> functor_type; \
+ static __host__ __device__ __forceinline__ functor_type create_functor() \
+ { \
+ return functor_type(); \
+ } \
+ }; \
+ template <typename T> struct name ## _full_traits \
+ { \
+ typedef ::cv::gpu::device::color_detail::HLS2RGB<T, scn, dcn, bidx, 255> functor_type; \
+ static __host__ __device__ __forceinline__ functor_type create_functor() \
+ { \
+ return functor_type(); \
+ } \
+ }; \
+ template <> struct name ## _traits<float> \
+ { \
+ typedef ::cv::gpu::device::color_detail::HLS2RGB<float, scn, dcn, bidx, 360> functor_type; \
+ static __host__ __device__ __forceinline__ functor_type create_functor() \
+ { \
+ return functor_type(); \
+ } \
+ }; \
+ template <> struct name ## _full_traits<float> \
+ { \
+ typedef ::cv::gpu::device::color_detail::HLS2RGB<float, scn, dcn, bidx, 360> functor_type; \
+ static __host__ __device__ __forceinline__ functor_type create_functor() \
+ { \
+ return functor_type(); \
+ } \
+ };
+
+///////////////////////////////////// RGB <-> Lab /////////////////////////////////////
+
+ namespace color_detail
+ {
+ enum
+ {
+ LAB_CBRT_TAB_SIZE = 1024,
+ GAMMA_TAB_SIZE = 1024,
+ lab_shift = xyz_shift,
+ gamma_shift = 3,
+ lab_shift2 = (lab_shift + gamma_shift),
+ LAB_CBRT_TAB_SIZE_B = (256 * 3 / 2 * (1 << gamma_shift))
+ };
+
+ __constant__ ushort c_sRGBGammaTab_b[] = {0,1,1,2,2,3,4,4,5,6,6,7,8,8,9,10,11,11,12,13,14,15,16,17,19,20,21,22,24,25,26,28,29,31,33,34,36,38,40,41,43,45,47,49,51,54,56,58,60,63,65,68,70,73,75,78,81,83,86,89,92,95,98,101,105,108,111,115,118,121,125,129,132,136,140,144,147,151,155,160,164,168,172,176,181,185,190,194,199,204,209,213,218,223,228,233,239,244,249,255,260,265,271,277,282,288,294,300,306,312,318,324,331,337,343,350,356,363,370,376,383,390,397,404,411,418,426,433,440,448,455,463,471,478,486,494,502,510,518,527,535,543,552,560,569,578,586,595,604,613,622,631,641,650,659,669,678,688,698,707,717,727,737,747,757,768,778,788,799,809,820,831,842,852,863,875,886,897,908,920,931,943,954,966,978,990,1002,1014,1026,1038,1050,1063,1075,1088,1101,1113,1126,1139,1152,1165,1178,1192,1205,1218,1232,1245,1259,1273,1287,1301,1315,1329,1343,1357,1372,1386,1401,1415,1430,1445,1460,1475,1490,1505,1521,1536,1551,1567,1583,1598,1614,1630,1646,1662,1678,1695,1711,1728,1744,1761,1778,1794,1811,1828,1846,1863,1880,1897,1915,1933,1950,1968,1986,2004,2022,2040};
+
+ __device__ __forceinline__ int LabCbrt_b(int i)
+ {
+ float x = i * (1.f / (255.f * (1 << gamma_shift)));
+ return (1 << lab_shift2) * (x < 0.008856f ? x * 7.787f + 0.13793103448275862f : ::cbrtf(x));
+ }
+
+ template <bool srgb, int blueIdx, typename T, typename D>
+ __device__ __forceinline__ void RGB2LabConvert_8U(const T& src, D& dst)
+ {
+ const int Lscale = (116 * 255 + 50) / 100;
+ const int Lshift = -((16 * 255 * (1 << lab_shift2) + 50) / 100);
+
+ int B = blueIdx == 0 ? src.x : src.z;
+ int G = src.y;
+ int R = blueIdx == 0 ? src.z : src.x;
+
+ if (srgb)
+ {
+ B = c_sRGBGammaTab_b[B];
+ G = c_sRGBGammaTab_b[G];
+ R = c_sRGBGammaTab_b[R];
+ }
+ else
+ {
+ B <<= 3;
+ G <<= 3;
+ R <<= 3;
+ }
+
+ int fX = LabCbrt_b(CV_DESCALE(B * 778 + G * 1541 + R * 1777, lab_shift));
+ int fY = LabCbrt_b(CV_DESCALE(B * 296 + G * 2929 + R * 871, lab_shift));
+ int fZ = LabCbrt_b(CV_DESCALE(B * 3575 + G * 448 + R * 73, lab_shift));
+
+ int L = CV_DESCALE(Lscale * fY + Lshift, lab_shift2);
+ int a = CV_DESCALE(500 * (fX - fY) + 128 * (1 << lab_shift2), lab_shift2);
+ int b = CV_DESCALE(200 * (fY - fZ) + 128 * (1 << lab_shift2), lab_shift2);
+
+ dst.x = saturate_cast<uchar>(L);
+ dst.y = saturate_cast<uchar>(a);
+ dst.z = saturate_cast<uchar>(b);
+ }
+
+ __device__ __forceinline__ float splineInterpolate(float x, const float* tab, int n)
+ {
+ int ix = ::min(::max(int(x), 0), n-1);
+ x -= ix;
+ tab += ix * 4;
+ return ((tab[3] * x + tab[2]) * x + tab[1]) * x + tab[0];
+ }
+
+ __constant__ float c_sRGBGammaTab[] = {0,7.55853e-05,0.,-7.51331e-13,7.55853e-05,7.55853e-05,-2.25399e-12,3.75665e-12,0.000151171,7.55853e-05,9.01597e-12,-6.99932e-12,0.000226756,7.55853e-05,-1.1982e-11,2.41277e-12,0.000302341,7.55853e-05,-4.74369e-12,1.19001e-11,0.000377927,7.55853e-05,3.09568e-11,-2.09095e-11,0.000453512,7.55853e-05,-3.17718e-11,1.35303e-11,0.000529097,7.55853e-05,8.81905e-12,-4.10782e-12,0.000604683,7.55853e-05,-3.50439e-12,2.90097e-12,0.000680268,7.55853e-05,5.19852e-12,-7.49607e-12,0.000755853,7.55853e-05,-1.72897e-11,2.70833e-11,0.000831439,7.55854e-05,6.39602e-11,-4.26295e-11,0.000907024,7.55854e-05,-6.39282e-11,2.70193e-11,0.000982609,7.55853e-05,1.71298e-11,-7.24017e-12,0.00105819,7.55853e-05,-4.59077e-12,1.94137e-12,0.00113378,7.55853e-05,1.23333e-12,-5.25291e-13,0.00120937,7.55853e-05,-3.42545e-13,1.59799e-13,0.00128495,7.55853e-05,1.36852e-13,-1.13904e-13,0.00136054,7.55853e-05,-2.04861e-13,2.95818e-13,0.00143612,7.55853e-05,6.82594e-13,-1.06937e-12,0.00151171,7.55853e-05,-2.52551e-12,3.98166e-12,0.00158729,7.55853e-05,9.41946e-12,-1.48573e-11,0.00166288,7.55853e-05,-3.51523e-11,5.54474e-11,0.00173846,7.55854e-05,1.3119e-10,-9.0517e-11,0.00181405,7.55854e-05,-1.40361e-10,7.37899e-11,0.00188963,7.55853e-05,8.10085e-11,-8.82272e-11,0.00196522,7.55852e-05,-1.83673e-10,1.62704e-10,0.0020408,7.55853e-05,3.04438e-10,-2.13341e-10,0.00211639,7.55853e-05,-3.35586e-10,2.25e-10,0.00219197,7.55853e-05,3.39414e-10,-2.20997e-10,0.00226756,7.55853e-05,-3.23576e-10,1.93326e-10,0.00234315,7.55853e-05,2.564e-10,-8.66446e-11,0.00241873,7.55855e-05,-3.53328e-12,-7.9578e-11,0.00249432,7.55853e-05,-2.42267e-10,1.72126e-10,0.0025699,7.55853e-05,2.74111e-10,-1.43265e-10,0.00264549,7.55854e-05,-1.55683e-10,-6.47292e-11,0.00272107,7.55849e-05,-3.4987e-10,8.67842e-10,0.00279666,7.55868e-05,2.25366e-09,-3.8723e-09,0.00287224,7.55797e-05,-9.36325e-09,1.5087e-08,0.00294783,7.56063e-05,3.58978e-08,-5.69415e-08,0.00302341,7.55072e-05,-1.34927e-07,2.13144e-07,0.003099,7.58768e-05,5.04507e-07,1.38713e-07,0.00317552,7.7302e-05,9.20646e-07,-1.55186e-07,0.00325359,7.86777e-05,4.55087e-07,4.26813e-08,0.00333276,7.97159e-05,5.83131e-07,-1.06495e-08,0.00341305,8.08502e-05,5.51182e-07,3.87467e-09,0.00349446,8.19642e-05,5.62806e-07,-1.92586e-10,0.00357698,8.30892e-05,5.62228e-07,1.0866e-09,0.00366063,8.4217e-05,5.65488e-07,5.02818e-10,0.00374542,8.53494e-05,5.66997e-07,8.60211e-10,0.00383133,8.6486e-05,5.69577e-07,7.13044e-10,0.00391839,8.76273e-05,5.71716e-07,4.78527e-10,0.00400659,8.87722e-05,5.73152e-07,1.09818e-09,0.00409594,8.99218e-05,5.76447e-07,2.50964e-10,0.00418644,9.10754e-05,5.772e-07,1.15762e-09,0.00427809,9.22333e-05,5.80672e-07,2.40865e-10,0.0043709,9.33954e-05,5.81395e-07,1.13854e-09,0.00446488,9.45616e-05,5.84811e-07,3.27267e-10,0.00456003,9.57322e-05,5.85792e-07,8.1197e-10,0.00465635,9.69062e-05,5.88228e-07,6.15823e-10,0.00475384,9.80845e-05,5.90076e-07,9.15747e-10,0.00485252,9.92674e-05,5.92823e-07,3.778e-10,0.00495238,0.000100454,5.93956e-07,8.32623e-10,0.00505343,0.000101645,5.96454e-07,4.82695e-10,0.00515567,0.000102839,5.97902e-07,9.61904e-10,0.00525911,0.000104038,6.00788e-07,3.26281e-10,0.00536375,0.00010524,6.01767e-07,9.926e-10,0.00546959,0.000106447,6.04745e-07,3.59933e-10,0.00557664,0.000107657,6.05824e-07,8.2728e-10,0.0056849,0.000108871,6.08306e-07,5.21898e-10,0.00579438,0.00011009,6.09872e-07,8.10492e-10,0.00590508,0.000111312,6.12303e-07,4.27046e-10,0.00601701,0.000112538,6.13585e-07,7.40878e-10,0.00613016,0.000113767,6.15807e-07,8.00469e-10,0.00624454,0.000115001,6.18209e-07,2.48178e-10,0.00636016,0.000116238,6.18953e-07,1.00073e-09,0.00647702,0.000117479,6.21955e-07,4.05654e-10,0.00659512,0.000118724,6.23172e-07,6.36192e-10,0.00671447,0.000119973,6.25081e-07,7.74927e-10,0.00683507,0.000121225,6.27406e-07,4.54975e-10,0.00695692,0.000122481,6.28771e-07,6.64841e-10,0.00708003,0.000123741,6.30765e-07,6.10972e-10,0.00720441,0.000125004,6.32598e-07,6.16543e-10,0.00733004,0.000126271,6.34448e-07,6.48204e-10,0.00745695,0.000127542,6.36392e-07,5.15835e-10,0.00758513,0.000128816,6.3794e-07,5.48103e-10,0.00771458,0.000130094,6.39584e-07,1.01706e-09,0.00784532,0.000131376,6.42635e-07,4.0283e-11,0.00797734,0.000132661,6.42756e-07,6.84471e-10,0.00811064,0.000133949,6.4481e-07,9.47144e-10,0.00824524,0.000135241,6.47651e-07,1.83472e-10,0.00838112,0.000136537,6.48201e-07,1.11296e-09,0.00851831,0.000137837,6.5154e-07,2.13163e-11,0.0086568,0.00013914,6.51604e-07,6.64462e-10,0.00879659,0.000140445,6.53598e-07,1.04613e-09,0.00893769,0.000141756,6.56736e-07,-1.92377e-10,0.0090801,0.000143069,6.56159e-07,1.58601e-09,0.00922383,0.000144386,6.60917e-07,-5.63754e-10,0.00936888,0.000145706,6.59226e-07,1.60033e-09,0.00951524,0.000147029,6.64027e-07,-2.49543e-10,0.00966294,0.000148356,6.63278e-07,1.26043e-09,0.00981196,0.000149687,6.67059e-07,-1.35572e-10,0.00996231,0.00015102,6.66653e-07,1.14458e-09,0.010114,0.000152357,6.70086e-07,2.13864e-10,0.010267,0.000153698,6.70728e-07,7.93856e-10,0.0104214,0.000155042,6.73109e-07,3.36077e-10,0.0105771,0.000156389,6.74118e-07,6.55765e-10,0.0107342,0.000157739,6.76085e-07,7.66211e-10,0.0108926,0.000159094,6.78384e-07,4.66116e-12,0.0110524,0.000160451,6.78398e-07,1.07775e-09,0.0112135,0.000161811,6.81631e-07,3.41023e-10,0.011376,0.000163175,6.82654e-07,3.5205e-10,0.0115398,0.000164541,6.8371e-07,1.04473e-09,0.0117051,0.000165912,6.86844e-07,1.25757e-10,0.0118717,0.000167286,6.87222e-07,3.14818e-10,0.0120396,0.000168661,6.88166e-07,1.40886e-09,0.012209,0.000170042,6.92393e-07,-3.62244e-10,0.0123797,0.000171425,6.91306e-07,9.71397e-10,0.0125518,0.000172811,6.9422e-07,2.02003e-10,0.0127253,0.0001742,6.94826e-07,1.01448e-09,0.0129002,0.000175593,6.97869e-07,3.96653e-10,0.0130765,0.00017699,6.99059e-07,1.92927e-10,0.0132542,0.000178388,6.99638e-07,6.94305e-10,0.0134333,0.00017979,7.01721e-07,7.55108e-10,0.0136138,0.000181195,7.03986e-07,1.05918e-11,0.0137957,0.000182603,7.04018e-07,1.06513e-09,0.013979,0.000184015,7.07214e-07,3.85512e-10,0.0141637,0.00018543,7.0837e-07,1.86769e-10,0.0143499,0.000186848,7.0893e-07,7.30116e-10,0.0145374,0.000188268,7.11121e-07,6.17983e-10,0.0147264,0.000189692,7.12975e-07,5.23282e-10,0.0149168,0.000191119,7.14545e-07,8.28398e-11,0.0151087,0.000192549,7.14793e-07,1.0081e-09,0.0153019,0.000193981,7.17817e-07,5.41244e-10,0.0154966,0.000195418,7.19441e-07,-3.7907e-10,0.0156928,0.000196856,7.18304e-07,1.90641e-09,0.0158903,0.000198298,7.24023e-07,-7.27387e-10,0.0160893,0.000199744,7.21841e-07,1.00317e-09,0.0162898,0.000201191,7.24851e-07,4.39949e-10,0.0164917,0.000202642,7.2617e-07,9.6234e-10,0.0166951,0.000204097,7.29057e-07,-5.64019e-10,0.0168999,0.000205554,7.27365e-07,1.29374e-09,0.0171062,0.000207012,7.31247e-07,9.77025e-10,0.017314,0.000208478,7.34178e-07,-1.47651e-09,0.0175232,0.000209942,7.29748e-07,3.06636e-09,0.0177338,0.00021141,7.38947e-07,-1.47573e-09,0.017946,0.000212884,7.3452e-07,9.7386e-10,0.0181596,0.000214356,7.37442e-07,1.30562e-09,0.0183747,0.000215835,7.41358e-07,-6.08376e-10,0.0185913,0.000217315,7.39533e-07,1.12785e-09,0.0188093,0.000218798,7.42917e-07,-1.77711e-10,0.0190289,0.000220283,7.42384e-07,1.44562e-09,0.0192499,0.000221772,7.46721e-07,-1.68825e-11,0.0194724,0.000223266,7.4667e-07,4.84533e-10,0.0196964,0.000224761,7.48124e-07,-5.85298e-11,0.0199219,0.000226257,7.47948e-07,1.61217e-09,0.0201489,0.000227757,7.52785e-07,-8.02136e-10,0.0203775,0.00022926,7.50378e-07,1.59637e-09,0.0206075,0.000230766,7.55167e-07,4.47168e-12,0.020839,0.000232276,7.55181e-07,2.48387e-10,0.021072,0.000233787,7.55926e-07,8.6474e-10,0.0213066,0.000235302,7.5852e-07,1.78299e-11,0.0215426,0.000236819,7.58573e-07,9.26567e-10,0.0217802,0.000238339,7.61353e-07,1.34529e-12,0.0220193,0.000239862,7.61357e-07,9.30659e-10,0.0222599,0.000241387,7.64149e-07,1.34529e-12,0.0225021,0.000242915,7.64153e-07,9.26567e-10,0.0227458,0.000244447,7.66933e-07,1.76215e-11,0.022991,0.00024598,7.66986e-07,8.65536e-10,0.0232377,0.000247517,7.69582e-07,2.45677e-10,0.023486,0.000249057,7.70319e-07,1.44193e-11,0.0237358,0.000250598,7.70363e-07,1.55918e-09,0.0239872,0.000252143,7.7504e-07,-6.63173e-10,0.0242401,0.000253691,7.73051e-07,1.09357e-09,0.0244946,0.000255241,7.76331e-07,1.41919e-11,0.0247506,0.000256793,7.76374e-07,7.12248e-10,0.0250082,0.000258348,7.78511e-07,8.62049e-10,0.0252673,0.000259908,7.81097e-07,-4.35061e-10,0.025528,0.000261469,7.79792e-07,8.7825e-10,0.0257902,0.000263031,7.82426e-07,6.47181e-10,0.0260541,0.000264598,7.84368e-07,2.58448e-10,0.0263194,0.000266167,7.85143e-07,1.81558e-10,0.0265864,0.000267738,7.85688e-07,8.78041e-10,0.0268549,0.000269312,7.88322e-07,3.15102e-11,0.027125,0.000270889,7.88417e-07,8.58525e-10,0.0273967,0.000272468,7.90992e-07,2.59812e-10,0.02767,0.000274051,7.91772e-07,-3.5224e-11,0.0279448,0.000275634,7.91666e-07,1.74377e-09,0.0282212,0.000277223,7.96897e-07,-1.35196e-09,0.0284992,0.000278813,7.92841e-07,1.80141e-09,0.0287788,0.000280404,7.98246e-07,-2.65629e-10,0.0290601,0.000281999,7.97449e-07,1.12374e-09,0.0293428,0.000283598,8.0082e-07,-5.04106e-10,0.0296272,0.000285198,7.99308e-07,8.92764e-10,0.0299132,0.000286799,8.01986e-07,6.58379e-10,0.0302008,0.000288405,8.03961e-07,1.98971e-10,0.0304901,0.000290014,8.04558e-07,4.08382e-10,0.0307809,0.000291624,8.05783e-07,3.01839e-11,0.0310733,0.000293236,8.05874e-07,1.33343e-09,0.0313673,0.000294851,8.09874e-07,2.2419e-10,0.031663,0.000296472,8.10547e-07,-3.67606e-10,0.0319603,0.000298092,8.09444e-07,1.24624e-09,0.0322592,0.000299714,8.13182e-07,-8.92025e-10,0.0325597,0.000301338,8.10506e-07,2.32183e-09,0.0328619,0.000302966,8.17472e-07,-9.44719e-10,0.0331657,0.000304598,8.14638e-07,1.45703e-09,0.0334711,0.000306232,8.19009e-07,-1.15805e-09,0.0337781,0.000307866,8.15535e-07,3.17507e-09,0.0340868,0.000309507,8.2506e-07,-4.09161e-09,0.0343971,0.000311145,8.12785e-07,5.74079e-09,0.0347091,0.000312788,8.30007e-07,-3.97034e-09,0.0350227,0.000314436,8.18096e-07,2.68985e-09,0.035338,0.00031608,8.26166e-07,6.61676e-10,0.0356549,0.000317734,8.28151e-07,-1.61123e-09,0.0359734,0.000319386,8.23317e-07,2.05786e-09,0.0362936,0.000321038,8.29491e-07,8.30388e-10,0.0366155,0.0003227,8.31982e-07,-1.65424e-09,0.036939,0.000324359,8.27019e-07,2.06129e-09,0.0372642,0.000326019,8.33203e-07,8.59719e-10,0.0375911,0.000327688,8.35782e-07,-1.77488e-09,0.0379196,0.000329354,8.30458e-07,2.51464e-09,0.0382498,0.000331023,8.38002e-07,-8.33135e-10,0.0385817,0.000332696,8.35502e-07,8.17825e-10,0.0389152,0.00033437,8.37956e-07,1.28718e-09,0.0392504,0.00033605,8.41817e-07,-2.2413e-09,0.0395873,0.000337727,8.35093e-07,3.95265e-09,0.0399258,0.000339409,8.46951e-07,-2.39332e-09,0.0402661,0.000341095,8.39771e-07,1.89533e-09,0.040608,0.000342781,8.45457e-07,-1.46271e-09,0.0409517,0.000344467,8.41069e-07,3.95554e-09,0.041297,0.000346161,8.52936e-07,-3.18369e-09,0.041644,0.000347857,8.43385e-07,1.32873e-09,0.0419927,0.000349548,8.47371e-07,1.59402e-09,0.0423431,0.000351248,8.52153e-07,-2.54336e-10,0.0426952,0.000352951,8.5139e-07,-5.76676e-10,0.043049,0.000354652,8.4966e-07,2.56114e-09,0.0434045,0.000356359,8.57343e-07,-2.21744e-09,0.0437617,0.000358067,8.50691e-07,2.58344e-09,0.0441206,0.000359776,8.58441e-07,-6.65826e-10,0.0444813,0.000361491,8.56444e-07,7.99218e-11,0.0448436,0.000363204,8.56684e-07,3.46063e-10,0.0452077,0.000364919,8.57722e-07,2.26116e-09,0.0455734,0.000366641,8.64505e-07,-1.94005e-09,0.045941,0.000368364,8.58685e-07,1.77384e-09,0.0463102,0.000370087,8.64007e-07,-1.43005e-09,0.0466811,0.000371811,8.59717e-07,3.94634e-09,0.0470538,0.000373542,8.71556e-07,-3.17946e-09,0.0474282,0.000375276,8.62017e-07,1.32104e-09,0.0478043,0.000377003,8.6598e-07,1.62045e-09,0.0481822,0.00037874,8.70842e-07,-3.52297e-10,0.0485618,0.000380481,8.69785e-07,-2.11211e-10,0.0489432,0.00038222,8.69151e-07,1.19716e-09,0.0493263,0.000383962,8.72743e-07,-8.52026e-10,0.0497111,0.000385705,8.70187e-07,2.21092e-09,0.0500977,0.000387452,8.76819e-07,-5.41339e-10,0.050486,0.000389204,8.75195e-07,-4.5361e-11,0.0508761,0.000390954,8.75059e-07,7.22669e-10,0.0512679,0.000392706,8.77227e-07,8.79936e-10,0.0516615,0.000394463,8.79867e-07,-5.17048e-10,0.0520568,0.000396222,8.78316e-07,1.18833e-09,0.0524539,0.000397982,8.81881e-07,-5.11022e-10,0.0528528,0.000399744,8.80348e-07,8.55683e-10,0.0532534,0.000401507,8.82915e-07,8.13562e-10,0.0536558,0.000403276,8.85356e-07,-3.84603e-10,0.05406,0.000405045,8.84202e-07,7.24962e-10,0.0544659,0.000406816,8.86377e-07,1.20986e-09,0.0548736,0.000408592,8.90006e-07,-1.83896e-09,0.0552831,0.000410367,8.84489e-07,2.42071e-09,0.0556944,0.000412143,8.91751e-07,-3.93413e-10,0.0561074,0.000413925,8.90571e-07,-8.46967e-10,0.0565222,0.000415704,8.8803e-07,3.78122e-09,0.0569388,0.000417491,8.99374e-07,-3.1021e-09,0.0573572,0.000419281,8.90068e-07,1.17658e-09,0.0577774,0.000421064,8.93597e-07,2.12117e-09,0.0581993,0.000422858,8.99961e-07,-2.21068e-09,0.0586231,0.000424651,8.93329e-07,2.9961e-09,0.0590486,0.000426447,9.02317e-07,-2.32311e-09,0.059476,0.000428244,8.95348e-07,2.57122e-09,0.0599051,0.000430043,9.03062e-07,-5.11098e-10,0.0603361,0.000431847,9.01528e-07,-5.27166e-10,0.0607688,0.000433649,8.99947e-07,2.61984e-09,0.0612034,0.000435457,9.07806e-07,-2.50141e-09,0.0616397,0.000437265,9.00302e-07,3.66045e-09,0.0620779,0.000439076,9.11283e-07,-4.68977e-09,0.0625179,0.000440885,8.97214e-07,7.64783e-09,0.0629597,0.000442702,9.20158e-07,-7.27499e-09,0.0634033,0.000444521,8.98333e-07,6.55113e-09,0.0638487,0.000446337,9.17986e-07,-4.02844e-09,0.0642959,0.000448161,9.05901e-07,2.11196e-09,0.064745,0.000449979,9.12236e-07,3.03125e-09,0.0651959,0.000451813,9.2133e-07,-6.78648e-09,0.0656486,0.000453635,9.00971e-07,9.21375e-09,0.0661032,0.000455464,9.28612e-07,-7.71684e-09,0.0665596,0.000457299,9.05462e-07,6.7522e-09,0.0670178,0.00045913,9.25718e-07,-4.3907e-09,0.0674778,0.000460968,9.12546e-07,3.36e-09,0.0679397,0.000462803,9.22626e-07,-1.59876e-09,0.0684034,0.000464644,9.1783e-07,3.0351e-09,0.068869,0.000466488,9.26935e-07,-3.09101e-09,0.0693364,0.000468333,9.17662e-07,1.8785e-09,0.0698057,0.000470174,9.23298e-07,3.02733e-09,0.0702768,0.00047203,9.3238e-07,-6.53722e-09,0.0707497,0.000473875,9.12768e-07,8.22054e-09,0.0712245,0.000475725,9.37429e-07,-3.99325e-09,0.0717012,0.000477588,9.2545e-07,3.01839e-10,0.0721797,0.00047944,9.26355e-07,2.78597e-09,0.0726601,0.000481301,9.34713e-07,-3.99507e-09,0.0731423,0.000483158,9.22728e-07,5.7435e-09,0.0736264,0.000485021,9.39958e-07,-4.07776e-09,0.0741123,0.000486888,9.27725e-07,3.11695e-09,0.0746002,0.000488753,9.37076e-07,-9.39394e-10,0.0750898,0.000490625,9.34258e-07,6.4055e-10,0.0755814,0.000492495,9.3618e-07,-1.62265e-09,0.0760748,0.000494363,9.31312e-07,5.84995e-09,0.0765701,0.000496243,9.48861e-07,-6.87601e-09,0.0770673,0.00049812,9.28233e-07,6.75296e-09,0.0775664,0.000499997,9.48492e-07,-5.23467e-09,0.0780673,0.000501878,9.32788e-07,6.73523e-09,0.0785701,0.000503764,9.52994e-07,-6.80514e-09,0.0790748,0.000505649,9.32578e-07,5.5842e-09,0.0795814,0.000507531,9.49331e-07,-6.30583e-10,0.0800899,0.000509428,9.47439e-07,-3.0618e-09,0.0806003,0.000511314,9.38254e-07,5.4273e-09,0.0811125,0.000513206,9.54536e-07,-3.74627e-09,0.0816267,0.000515104,9.43297e-07,2.10713e-09,0.0821427,0.000516997,9.49618e-07,2.76839e-09,0.0826607,0.000518905,9.57924e-07,-5.73006e-09,0.0831805,0.000520803,9.40733e-07,5.25072e-09,0.0837023,0.0005227,9.56486e-07,-3.71718e-10,0.084226,0.000524612,9.5537e-07,-3.76404e-09,0.0847515,0.000526512,9.44078e-07,7.97735e-09,0.085279,0.000528424,9.6801e-07,-5.79367e-09,0.0858084,0.000530343,9.50629e-07,2.96268e-10,0.0863397,0.000532245,9.51518e-07,4.6086e-09,0.0868729,0.000534162,9.65344e-07,-3.82947e-09,0.087408,0.000536081,9.53856e-07,3.25861e-09,0.087945,0.000537998,9.63631e-07,-1.7543e-09,0.088484,0.00053992,9.58368e-07,3.75849e-09,0.0890249,0.000541848,9.69644e-07,-5.82891e-09,0.0895677,0.00054377,9.52157e-07,4.65593e-09,0.0901124,0.000545688,9.66125e-07,2.10643e-09,0.0906591,0.000547627,9.72444e-07,-5.63099e-09,0.0912077,0.000549555,9.55551e-07,5.51627e-09,0.0917582,0.000551483,9.721e-07,-1.53292e-09,0.0923106,0.000553422,9.67501e-07,6.15311e-10,0.092865,0.000555359,9.69347e-07,-9.28291e-10,0.0934213,0.000557295,9.66562e-07,3.09774e-09,0.0939796,0.000559237,9.75856e-07,-4.01186e-09,0.0945398,0.000561177,9.6382e-07,5.49892e-09,0.095102,0.000563121,9.80317e-07,-3.08258e-09,0.0956661,0.000565073,9.71069e-07,-6.19176e-10,0.0962321,0.000567013,9.69212e-07,5.55932e-09,0.0968001,0.000568968,9.8589e-07,-6.71704e-09,0.09737,0.00057092,9.65738e-07,6.40762e-09,0.0979419,0.00057287,9.84961e-07,-4.0122e-09,0.0985158,0.000574828,9.72925e-07,2.19059e-09,0.0990916,0.000576781,9.79496e-07,2.70048e-09,0.0996693,0.000578748,9.87598e-07,-5.54193e-09,0.100249,0.000580706,9.70972e-07,4.56597e-09,0.100831,0.000582662,9.8467e-07,2.17923e-09,0.101414,0.000584638,9.91208e-07,-5.83232e-09,0.102,0.000586603,9.73711e-07,6.24884e-09,0.102588,0.000588569,9.92457e-07,-4.26178e-09,0.103177,0.000590541,9.79672e-07,3.34781e-09,0.103769,0.00059251,9.89715e-07,-1.67904e-09,0.104362,0.000594485,9.84678e-07,3.36839e-09,0.104958,0.000596464,9.94783e-07,-4.34397e-09,0.105555,0.000598441,9.81751e-07,6.55696e-09,0.106155,0.000600424,1.00142e-06,-6.98272e-09,0.106756,0.000602406,9.80474e-07,6.4728e-09,0.107359,0.000604386,9.99893e-07,-4.00742e-09,0.107965,0.000606374,9.8787e-07,2.10654e-09,0.108572,0.000608356,9.9419e-07,3.0318e-09,0.109181,0.000610353,1.00329e-06,-6.7832e-09,0.109793,0.00061234,9.82936e-07,9.1998e-09,0.110406,0.000614333,1.01054e-06,-7.6642e-09,0.111021,0.000616331,9.87543e-07,6.55579e-09,0.111639,0.000618326,1.00721e-06,-3.65791e-09,0.112258,0.000620329,9.96236e-07,6.25467e-10,0.112879,0.000622324,9.98113e-07,1.15593e-09,0.113503,0.000624323,1.00158e-06,2.20158e-09,0.114128,0.000626333,1.00819e-06,-2.51191e-09,0.114755,0.000628342,1.00065e-06,3.95517e-10,0.115385,0.000630345,1.00184e-06,9.29807e-10,0.116016,0.000632351,1.00463e-06,3.33599e-09,0.116649,0.00063437,1.01463e-06,-6.82329e-09,0.117285,0.000636379,9.94163e-07,9.05595e-09,0.117922,0.000638395,1.02133e-06,-7.04862e-09,0.118562,0.000640416,1.00019e-06,4.23737e-09,0.119203,0.000642429,1.0129e-06,-2.45033e-09,0.119847,0.000644448,1.00555e-06,5.56395e-09,0.120492,0.000646475,1.02224e-06,-4.9043e-09,0.121139,0.000648505,1.00753e-06,-8.47952e-10,0.121789,0.000650518,1.00498e-06,8.29622e-09,0.122441,0.000652553,1.02987e-06,-9.98538e-09,0.123094,0.000654582,9.99914e-07,9.2936e-09,0.12375,0.00065661,1.02779e-06,-4.83707e-09,0.124407,0.000658651,1.01328e-06,2.60411e-09,0.125067,0.000660685,1.0211e-06,-5.57945e-09,0.125729,0.000662711,1.00436e-06,1.22631e-08,0.126392,0.000664756,1.04115e-06,-1.36704e-08,0.127058,0.000666798,1.00014e-06,1.26161e-08,0.127726,0.000668836,1.03798e-06,-6.99155e-09,0.128396,0.000670891,1.01701e-06,4.48836e-10,0.129068,0.000672926,1.01836e-06,5.19606e-09,0.129742,0.000674978,1.03394e-06,-6.3319e-09,0.130418,0.000677027,1.01495e-06,5.2305e-09,0.131096,0.000679073,1.03064e-06,3.11123e-10,0.131776,0.000681135,1.03157e-06,-6.47511e-09,0.132458,0.000683179,1.01215e-06,1.06882e-08,0.133142,0.000685235,1.04421e-06,-6.47519e-09,0.133829,0.000687304,1.02479e-06,3.11237e-10,0.134517,0.000689355,1.02572e-06,5.23035e-09,0.135207,0.000691422,1.04141e-06,-6.3316e-09,0.1359,0.000693486,1.02242e-06,5.19484e-09,0.136594,0.000695546,1.038e-06,4.53497e-10,0.137291,0.000697623,1.03936e-06,-7.00891e-09,0.137989,0.000699681,1.01834e-06,1.2681e-08,0.13869,0.000701756,1.05638e-06,-1.39128e-08,0.139393,0.000703827,1.01464e-06,1.31679e-08,0.140098,0.000705896,1.05414e-06,-8.95659e-09,0.140805,0.000707977,1.02727e-06,7.75742e-09,0.141514,0.000710055,1.05055e-06,-7.17182e-09,0.142225,0.000712135,1.02903e-06,6.02862e-09,0.142938,0.000714211,1.04712e-06,-2.04163e-09,0.143653,0.000716299,1.04099e-06,2.13792e-09,0.144371,0.000718387,1.04741e-06,-6.51009e-09,0.14509,0.000720462,1.02787e-06,9.00123e-09,0.145812,0.000722545,1.05488e-06,3.07523e-10,0.146535,0.000724656,1.0558e-06,-1.02312e-08,0.147261,0.000726737,1.02511e-06,1.0815e-08,0.147989,0.000728819,1.05755e-06,-3.22681e-09,0.148719,0.000730925,1.04787e-06,2.09244e-09,0.14945,0.000733027,1.05415e-06,-5.143e-09,0.150185,0.00073512,1.03872e-06,3.57844e-09,0.150921,0.000737208,1.04946e-06,5.73027e-09,0.151659,0.000739324,1.06665e-06,-1.15983e-08,0.152399,0.000741423,1.03185e-06,1.08605e-08,0.153142,0.000743519,1.06443e-06,-2.04106e-09,0.153886,0.000745642,1.05831e-06,-2.69642e-09,0.154633,0.00074775,1.05022e-06,-2.07425e-09,0.155382,0.000749844,1.044e-06,1.09934e-08,0.156133,0.000751965,1.07698e-06,-1.20972e-08,0.156886,0.000754083,1.04069e-06,7.59288e-09,0.157641,0.000756187,1.06347e-06,-3.37305e-09,0.158398,0.000758304,1.05335e-06,5.89921e-09,0.159158,0.000760428,1.07104e-06,-5.32248e-09,0.159919,0.000762554,1.05508e-06,4.8927e-10,0.160683,0.000764666,1.05654e-06,3.36547e-09,0.161448,0.000766789,1.06664e-06,9.50081e-10,0.162216,0.000768925,1.06949e-06,-7.16568e-09,0.162986,0.000771043,1.04799e-06,1.28114e-08,0.163758,0.000773177,1.08643e-06,-1.42774e-08,0.164533,0.000775307,1.0436e-06,1.44956e-08,0.165309,0.000777438,1.08708e-06,-1.39025e-08,0.166087,0.00077957,1.04538e-06,1.13118e-08,0.166868,0.000781695,1.07931e-06,-1.54224e-09,0.167651,0.000783849,1.07468e-06,-5.14312e-09,0.168436,0.000785983,1.05925e-06,7.21381e-09,0.169223,0.000788123,1.0809e-06,-8.81096e-09,0.170012,0.000790259,1.05446e-06,1.31289e-08,0.170803,0.000792407,1.09385e-06,-1.39022e-08,0.171597,0.000794553,1.05214e-06,1.26775e-08,0.172392,0.000796695,1.09018e-06,-7.00557e-09,0.17319,0.000798855,1.06916e-06,4.43796e-10,0.17399,0.000800994,1.07049e-06,5.23031e-09,0.174792,0.000803151,1.08618e-06,-6.46397e-09,0.175596,0.000805304,1.06679e-06,5.72444e-09,0.176403,0.000807455,1.08396e-06,-1.53254e-09,0.177211,0.000809618,1.07937e-06,4.05673e-10,0.178022,0.000811778,1.08058e-06,-9.01916e-11,0.178835,0.000813939,1.08031e-06,-4.49821e-11,0.17965,0.000816099,1.08018e-06,2.70234e-10,0.180467,0.00081826,1.08099e-06,-1.03603e-09,0.181286,0.000820419,1.07788e-06,3.87392e-09,0.182108,0.000822587,1.0895e-06,4.41522e-10,0.182932,0.000824767,1.09083e-06,-5.63997e-09,0.183758,0.000826932,1.07391e-06,7.21707e-09,0.184586,0.000829101,1.09556e-06,-8.32718e-09,0.185416,0.000831267,1.07058e-06,1.11907e-08,0.186248,0.000833442,1.10415e-06,-6.63336e-09,0.187083,0.00083563,1.08425e-06,4.41484e-10,0.187919,0.0008378,1.08557e-06,4.86754e-09,0.188758,0.000839986,1.10017e-06,-5.01041e-09,0.189599,0.000842171,1.08514e-06,2.72811e-10,0.190443,0.000844342,1.08596e-06,3.91916e-09,0.191288,0.000846526,1.09772e-06,-1.04819e-09,0.192136,0.000848718,1.09457e-06,2.73531e-10,0.192985,0.000850908,1.0954e-06,-4.58916e-11,0.193837,0.000853099,1.09526e-06,-9.01158e-11,0.194692,0.000855289,1.09499e-06,4.06506e-10,0.195548,0.00085748,1.09621e-06,-1.53595e-09,0.196407,0.000859668,1.0916e-06,5.73717e-09,0.197267,0.000861869,1.10881e-06,-6.51164e-09,0.19813,0.000864067,1.08928e-06,5.40831e-09,0.198995,0.000866261,1.1055e-06,-2.20401e-10,0.199863,0.000868472,1.10484e-06,-4.52652e-09,0.200732,0.000870668,1.09126e-06,3.42508e-09,0.201604,0.000872861,1.10153e-06,5.72762e-09,0.202478,0.000875081,1.11872e-06,-1.14344e-08,0.203354,0.000877284,1.08441e-06,1.02076e-08,0.204233,0.000879484,1.11504e-06,4.06355e-10,0.205113,0.000881715,1.11626e-06,-1.18329e-08,0.205996,0.000883912,1.08076e-06,1.71227e-08,0.206881,0.000886125,1.13213e-06,-1.19546e-08,0.207768,0.000888353,1.09626e-06,8.93465e-10,0.208658,0.000890548,1.09894e-06,8.38062e-09,0.209549,0.000892771,1.12408e-06,-4.61353e-09,0.210443,0.000895006,1.11024e-06,-4.82756e-09,0.211339,0.000897212,1.09576e-06,9.02245e-09,0.212238,0.00089943,1.12283e-06,-1.45997e-09,0.213138,0.000901672,1.11845e-06,-3.18255e-09,0.214041,0.000903899,1.1089e-06,-7.11073e-10,0.214946,0.000906115,1.10677e-06,6.02692e-09,0.215853,0.000908346,1.12485e-06,-8.49548e-09,0.216763,0.00091057,1.09936e-06,1.30537e-08,0.217675,0.000912808,1.13852e-06,-1.3917e-08,0.218588,0.000915044,1.09677e-06,1.28121e-08,0.219505,0.000917276,1.13521e-06,-7.5288e-09,0.220423,0.000919523,1.11262e-06,2.40205e-09,0.221344,0.000921756,1.11983e-06,-2.07941e-09,0.222267,0.000923989,1.11359e-06,5.91551e-09,0.223192,0.000926234,1.13134e-06,-6.68149e-09,0.224119,0.000928477,1.11129e-06,5.90929e-09,0.225049,0.000930717,1.12902e-06,-2.05436e-09,0.22598,0.000932969,1.12286e-06,2.30807e-09,0.226915,0.000935222,1.12978e-06,-7.17796e-09,0.227851,0.00093746,1.10825e-06,1.15028e-08,0.228789,0.000939711,1.14276e-06,-9.03083e-09,0.22973,0.000941969,1.11566e-06,9.71932e-09,0.230673,0.00094423,1.14482e-06,-1.49452e-08,0.231619,0.000946474,1.09998e-06,2.02591e-08,0.232566,0.000948735,1.16076e-06,-2.13879e-08,0.233516,0.000950993,1.0966e-06,2.05888e-08,0.234468,0.000953247,1.15837e-06,-1.62642e-08,0.235423,0.000955515,1.10957e-06,1.46658e-08,0.236379,0.000957779,1.15357e-06,-1.25966e-08,0.237338,0.000960048,1.11578e-06,5.91793e-09,0.238299,0.000962297,1.13353e-06,3.82602e-09,0.239263,0.000964576,1.14501e-06,-6.3208e-09,0.240229,0.000966847,1.12605e-06,6.55613e-09,0.241197,0.000969119,1.14572e-06,-5.00268e-09,0.242167,0.000971395,1.13071e-06,-1.44659e-09,0.243139,0.000973652,1.12637e-06,1.07891e-08,0.244114,0.000975937,1.15874e-06,-1.19073e-08,0.245091,0.000978219,1.12302e-06,7.03782e-09,0.246071,0.000980486,1.14413e-06,-1.34276e-09,0.247052,0.00098277,1.1401e-06,-1.66669e-09,0.248036,0.000985046,1.1351e-06,8.00935e-09,0.249022,0.00098734,1.15913e-06,-1.54694e-08,0.250011,0.000989612,1.11272e-06,2.4066e-08,0.251002,0.000991909,1.18492e-06,-2.11901e-08,0.251995,0.000994215,1.12135e-06,1.08973e-09,0.25299,0.000996461,1.12462e-06,1.68311e-08,0.253988,0.000998761,1.17511e-06,-8.8094e-09,0.254987,0.00100109,1.14868e-06,-1.13958e-08,0.25599,0.00100335,1.1145e-06,2.45902e-08,0.256994,0.00100565,1.18827e-06,-2.73603e-08,0.258001,0.00100795,1.10618e-06,2.52464e-08,0.25901,0.00101023,1.18192e-06,-1.40207e-08,0.260021,0.00101256,1.13986e-06,1.03387e-09,0.261035,0.00101484,1.14296e-06,9.8853e-09,0.262051,0.00101715,1.17262e-06,-1.07726e-08,0.263069,0.00101947,1.1403e-06,3.40272e-09,0.26409,0.00102176,1.15051e-06,-2.83827e-09,0.265113,0.00102405,1.142e-06,7.95039e-09,0.266138,0.00102636,1.16585e-06,8.39047e-10,0.267166,0.00102869,1.16836e-06,-1.13066e-08,0.268196,0.00103099,1.13444e-06,1.4585e-08,0.269228,0.00103331,1.1782e-06,-1.72314e-08,0.270262,0.00103561,1.1265e-06,2.45382e-08,0.271299,0.00103794,1.20012e-06,-2.13166e-08,0.272338,0.00104028,1.13617e-06,1.12364e-09,0.273379,0.00104255,1.13954e-06,1.68221e-08,0.274423,0.00104488,1.19001e-06,-8.80736e-09,0.275469,0.00104723,1.16358e-06,-1.13948e-08,0.276518,0.00104953,1.1294e-06,2.45839e-08,0.277568,0.00105186,1.20315e-06,-2.73361e-08,0.278621,0.00105418,1.12114e-06,2.51559e-08,0.279677,0.0010565,1.19661e-06,-1.36832e-08,0.280734,0.00105885,1.15556e-06,-2.25706e-10,0.281794,0.00106116,1.15488e-06,1.45862e-08,0.282857,0.00106352,1.19864e-06,-2.83167e-08,0.283921,0.00106583,1.11369e-06,3.90759e-08,0.284988,0.00106817,1.23092e-06,-3.85801e-08,0.286058,0.00107052,1.11518e-06,2.58375e-08,0.287129,0.00107283,1.19269e-06,-5.16498e-09,0.288203,0.0010752,1.1772e-06,-5.17768e-09,0.28928,0.00107754,1.16167e-06,-3.92671e-09,0.290358,0.00107985,1.14988e-06,2.08846e-08,0.29144,0.00108221,1.21254e-06,-2.00072e-08,0.292523,0.00108458,1.15252e-06,-4.60659e-10,0.293609,0.00108688,1.15114e-06,2.18499e-08,0.294697,0.00108925,1.21669e-06,-2.73343e-08,0.295787,0.0010916,1.13468e-06,2.78826e-08,0.29688,0.00109395,1.21833e-06,-2.45915e-08,0.297975,0.00109632,1.14456e-06,1.08787e-08,0.299073,0.00109864,1.17719e-06,1.08788e-08,0.300172,0.00110102,1.20983e-06,-2.45915e-08,0.301275,0.00110337,1.13605e-06,2.78828e-08,0.302379,0.00110573,1.2197e-06,-2.73348e-08,0.303486,0.00110808,1.1377e-06,2.18518e-08,0.304595,0.00111042,1.20325e-06,-4.67556e-10,0.305707,0.00111283,1.20185e-06,-1.99816e-08,0.306821,0.00111517,1.14191e-06,2.07891e-08,0.307937,0.00111752,1.20427e-06,-3.57026e-09,0.309056,0.00111992,1.19356e-06,-6.50797e-09,0.310177,0.00112228,1.17404e-06,-2.00165e-10,0.3113,0.00112463,1.17344e-06,7.30874e-09,0.312426,0.001127,1.19536e-06,7.67424e-10,0.313554,0.00112939,1.19767e-06,-1.03784e-08,0.314685,0.00113176,1.16653e-06,1.09437e-08,0.315818,0.00113412,1.19936e-06,-3.59406e-09,0.316953,0.00113651,1.18858e-06,3.43251e-09,0.318091,0.0011389,1.19888e-06,-1.0136e-08,0.319231,0.00114127,1.16847e-06,7.30915e-09,0.320374,0.00114363,1.1904e-06,1.07018e-08,0.321518,0.00114604,1.2225e-06,-2.03137e-08,0.322666,0.00114842,1.16156e-06,1.09484e-08,0.323815,0.00115078,1.19441e-06,6.32224e-09,0.324967,0.00115319,1.21337e-06,-6.43509e-09,0.326122,0.00115559,1.19407e-06,-1.03842e-08,0.327278,0.00115795,1.16291e-06,1.81697e-08,0.328438,0.00116033,1.21742e-06,-2.6901e-09,0.329599,0.00116276,1.20935e-06,-7.40939e-09,0.330763,0.00116515,1.18713e-06,2.52533e-09,0.331929,0.00116754,1.1947e-06,-2.69191e-09,0.333098,0.00116992,1.18663e-06,8.24218e-09,0.334269,0.00117232,1.21135e-06,-4.74377e-10,0.335443,0.00117474,1.20993e-06,-6.34471e-09,0.336619,0.00117714,1.1909e-06,-3.94922e-09,0.337797,0.00117951,1.17905e-06,2.21417e-08,0.338978,0.00118193,1.24547e-06,-2.50128e-08,0.340161,0.00118435,1.17043e-06,1.8305e-08,0.341346,0.00118674,1.22535e-06,-1.84048e-08,0.342534,0.00118914,1.17013e-06,2.55121e-08,0.343725,0.00119156,1.24667e-06,-2.40389e-08,0.344917,0.00119398,1.17455e-06,1.10389e-08,0.346113,0.00119636,1.20767e-06,9.68574e-09,0.34731,0.0011988,1.23673e-06,-1.99797e-08,0.34851,0.00120122,1.17679e-06,1.06284e-08,0.349713,0.0012036,1.20867e-06,7.26868e-09,0.350917,0.00120604,1.23048e-06,-9.90072e-09,0.352125,0.00120847,1.20078e-06,2.53177e-09,0.353334,0.00121088,1.20837e-06,-2.26199e-10,0.354546,0.0012133,1.20769e-06,-1.62705e-09,0.355761,0.00121571,1.20281e-06,6.73435e-09,0.356978,0.00121813,1.22302e-06,4.49207e-09,0.358197,0.00122059,1.23649e-06,-2.47027e-08,0.359419,0.00122299,1.16238e-06,3.47142e-08,0.360643,0.00122542,1.26653e-06,-2.47472e-08,0.36187,0.00122788,1.19229e-06,4.66965e-09,0.363099,0.00123028,1.20629e-06,6.06872e-09,0.36433,0.00123271,1.2245e-06,8.57729e-10,0.365564,0.00123516,1.22707e-06,-9.49952e-09,0.366801,0.00123759,1.19858e-06,7.33792e-09,0.36804,0.00124001,1.22059e-06,9.95025e-09,0.369281,0.00124248,1.25044e-06,-1.73366e-08,0.370525,0.00124493,1.19843e-06,-2.08464e-10,0.371771,0.00124732,1.1978e-06,1.81704e-08,0.373019,0.00124977,1.25232e-06,-1.28683e-08,0.37427,0.00125224,1.21371e-06,3.50042e-09,0.375524,0.00125468,1.22421e-06,-1.1335e-09,0.37678,0.00125712,1.22081e-06,1.03345e-09,0.378038,0.00125957,1.22391e-06,-3.00023e-09,0.379299,0.00126201,1.21491e-06,1.09676e-08,0.380562,0.00126447,1.24781e-06,-1.10676e-08,0.381828,0.00126693,1.21461e-06,3.50042e-09,0.383096,0.00126937,1.22511e-06,-2.93403e-09,0.384366,0.00127181,1.21631e-06,8.23574e-09,0.385639,0.00127427,1.24102e-06,-2.06607e-10,0.386915,0.00127675,1.2404e-06,-7.40935e-09,0.388193,0.00127921,1.21817e-06,4.1761e-11,0.389473,0.00128165,1.21829e-06,7.24223e-09,0.390756,0.0012841,1.24002e-06,7.91564e-10,0.392042,0.00128659,1.2424e-06,-1.04086e-08,0.393329,0.00128904,1.21117e-06,1.10405e-08,0.39462,0.0012915,1.24429e-06,-3.951e-09,0.395912,0.00129397,1.23244e-06,4.7634e-09,0.397208,0.00129645,1.24673e-06,-1.51025e-08,0.398505,0.0012989,1.20142e-06,2.58443e-08,0.399805,0.00130138,1.27895e-06,-2.86702e-08,0.401108,0.00130385,1.19294e-06,2.92318e-08,0.402413,0.00130632,1.28064e-06,-2.86524e-08,0.403721,0.0013088,1.19468e-06,2.57731e-08,0.405031,0.00131127,1.272e-06,-1.48355e-08,0.406343,0.00131377,1.2275e-06,3.76652e-09,0.407658,0.00131623,1.23879e-06,-2.30784e-10,0.408976,0.00131871,1.2381e-06,-2.84331e-09,0.410296,0.00132118,1.22957e-06,1.16041e-08,0.411618,0.00132367,1.26438e-06,-1.37708e-08,0.412943,0.00132616,1.22307e-06,1.36768e-08,0.41427,0.00132865,1.2641e-06,-1.1134e-08,0.4156,0.00133114,1.2307e-06,1.05714e-09,0.416933,0.00133361,1.23387e-06,6.90538e-09,0.418267,0.00133609,1.25459e-06,1.12372e-09,0.419605,0.00133861,1.25796e-06,-1.14002e-08,0.420945,0.00134109,1.22376e-06,1.46747e-08,0.422287,0.00134358,1.26778e-06,-1.7496e-08,0.423632,0.00134606,1.21529e-06,2.5507e-08,0.424979,0.00134857,1.29182e-06,-2.49272e-08,0.426329,0.00135108,1.21703e-06,1.45972e-08,0.427681,0.00135356,1.26083e-06,-3.65935e-09,0.429036,0.00135607,1.24985e-06,4.00178e-11,0.430393,0.00135857,1.24997e-06,3.49917e-09,0.431753,0.00136108,1.26047e-06,-1.40366e-08,0.433116,0.00136356,1.21836e-06,2.28448e-08,0.43448,0.00136606,1.28689e-06,-1.77378e-08,0.435848,0.00136858,1.23368e-06,1.83043e-08,0.437218,0.0013711,1.28859e-06,-2.56769e-08,0.43859,0.0013736,1.21156e-06,2.47987e-08,0.439965,0.0013761,1.28595e-06,-1.39133e-08,0.441342,0.00137863,1.24421e-06,1.05202e-09,0.442722,0.00138112,1.24737e-06,9.70507e-09,0.444104,0.00138365,1.27649e-06,-1.00698e-08,0.445489,0.00138617,1.24628e-06,7.72123e-10,0.446877,0.00138867,1.24859e-06,6.98132e-09,0.448267,0.00139118,1.26954e-06,1.10477e-09,0.449659,0.00139373,1.27285e-06,-1.14003e-08,0.451054,0.00139624,1.23865e-06,1.4694e-08,0.452452,0.00139876,1.28273e-06,-1.75734e-08,0.453852,0.00140127,1.23001e-06,2.5797e-08,0.455254,0.00140381,1.3074e-06,-2.60097e-08,0.456659,0.00140635,1.22937e-06,1.86371e-08,0.458067,0.00140886,1.28529e-06,-1.8736e-08,0.459477,0.00141137,1.22908e-06,2.65048e-08,0.46089,0.00141391,1.30859e-06,-2.76784e-08,0.462305,0.00141645,1.22556e-06,2.46043e-08,0.463722,0.00141897,1.29937e-06,-1.11341e-08,0.465143,0.00142154,1.26597e-06,-9.87033e-09,0.466565,0.00142404,1.23636e-06,2.08131e-08,0.467991,0.00142657,1.2988e-06,-1.37773e-08,0.469419,0.00142913,1.25746e-06,4.49378e-09,0.470849,0.00143166,1.27094e-06,-4.19781e-09,0.472282,0.00143419,1.25835e-06,1.22975e-08,0.473717,0.00143674,1.29524e-06,-1.51902e-08,0.475155,0.00143929,1.24967e-06,1.86608e-08,0.476596,0.00144184,1.30566e-06,-2.96506e-08,0.478039,0.00144436,1.2167e-06,4.03368e-08,0.479485,0.00144692,1.33771e-06,-4.22896e-08,0.480933,0.00144947,1.21085e-06,3.94148e-08,0.482384,0.00145201,1.32909e-06,-2.59626e-08,0.483837,0.00145459,1.2512e-06,4.83124e-09,0.485293,0.0014571,1.2657e-06,6.63757e-09,0.486751,0.00145966,1.28561e-06,-1.57911e-09,0.488212,0.00146222,1.28087e-06,-3.21468e-10,0.489676,0.00146478,1.27991e-06,2.86517e-09,0.491142,0.00146735,1.2885e-06,-1.11392e-08,0.49261,0.00146989,1.25508e-06,1.18893e-08,0.494081,0.00147244,1.29075e-06,-6.61574e-09,0.495555,0.001475,1.27091e-06,1.45736e-08,0.497031,0.00147759,1.31463e-06,-2.18759e-08,0.49851,0.00148015,1.249e-06,1.33252e-08,0.499992,0.00148269,1.28897e-06,-1.62277e-09,0.501476,0.00148526,1.28411e-06,-6.83421e-09,0.502962,0.00148781,1.2636e-06,2.89596e-08,0.504451,0.00149042,1.35048e-06,-4.93997e-08,0.505943,0.00149298,1.20228e-06,4.94299e-08,0.507437,0.00149553,1.35057e-06,-2.91107e-08,0.508934,0.00149814,1.26324e-06,7.40848e-09,0.510434,0.00150069,1.28547e-06,-5.23187e-10,0.511936,0.00150326,1.2839e-06,-5.31585e-09,0.51344,0.00150581,1.26795e-06,2.17866e-08,0.514947,0.00150841,1.33331e-06,-2.22257e-08,0.516457,0.00151101,1.26663e-06,7.51178e-09,0.517969,0.00151357,1.28917e-06,-7.82128e-09,0.519484,0.00151613,1.2657e-06,2.37733e-08,0.521002,0.00151873,1.33702e-06,-2.76674e-08,0.522522,0.00152132,1.25402e-06,2.72917e-08,0.524044,0.00152391,1.3359e-06,-2.18949e-08,0.525569,0.00152652,1.27021e-06,6.83372e-10,0.527097,0.00152906,1.27226e-06,1.91613e-08,0.528628,0.00153166,1.32974e-06,-1.77241e-08,0.53016,0.00153427,1.27657e-06,-7.86963e-09,0.531696,0.0015368,1.25296e-06,4.92027e-08,0.533234,0.00153945,1.40057e-06,-6.9732e-08,0.534775,0.00154204,1.19138e-06,5.09114e-08,0.536318,0.00154458,1.34411e-06,-1.4704e-08,0.537864,0.00154722,1.3e-06,7.9048e-09,0.539413,0.00154984,1.32371e-06,-1.69152e-08,0.540964,0.00155244,1.27297e-06,1.51355e-10,0.542517,0.00155499,1.27342e-06,1.63099e-08,0.544074,0.00155758,1.32235e-06,-5.78647e-09,0.545633,0.00156021,1.30499e-06,6.83599e-09,0.547194,0.00156284,1.3255e-06,-2.15575e-08,0.548758,0.00156543,1.26083e-06,1.97892e-08,0.550325,0.00156801,1.32019e-06,2.00525e-09,0.551894,0.00157065,1.32621e-06,-2.78103e-08,0.553466,0.00157322,1.24278e-06,4.96314e-08,0.555041,0.00157586,1.39167e-06,-5.1506e-08,0.556618,0.00157849,1.23716e-06,3.71835e-08,0.558198,0.00158107,1.34871e-06,-3.76233e-08,0.55978,0.00158366,1.23584e-06,5.37052e-08,0.561365,0.00158629,1.39695e-06,-5.79884e-08,0.562953,0.00158891,1.22299e-06,5.90392e-08,0.564543,0.00159153,1.4001e-06,-5.89592e-08,0.566136,0.00159416,1.22323e-06,5.7588e-08,0.567731,0.00159678,1.39599e-06,-5.21835e-08,0.569329,0.00159941,1.23944e-06,3.19369e-08,0.57093,0.00160199,1.33525e-06,-1.59594e-08,0.572533,0.00160461,1.28737e-06,3.19006e-08,0.574139,0.00160728,1.38307e-06,-5.20383e-08,0.575748,0.00160989,1.22696e-06,5.70431e-08,0.577359,0.00161251,1.39809e-06,-5.69247e-08,0.578973,0.00161514,1.22731e-06,5.14463e-08,0.580589,0.00161775,1.38165e-06,-2.9651e-08,0.582208,0.00162042,1.2927e-06,7.55339e-09,0.58383,0.00162303,1.31536e-06,-5.62636e-10,0.585455,0.00162566,1.31367e-06,-5.30281e-09,0.587081,0.00162827,1.29776e-06,2.17738e-08,0.588711,0.00163093,1.36309e-06,-2.21875e-08,0.590343,0.00163359,1.29652e-06,7.37164e-09,0.591978,0.00163621,1.31864e-06,-7.29907e-09,0.593616,0.00163882,1.29674e-06,2.18247e-08,0.595256,0.00164148,1.36221e-06,-2.03952e-08,0.596899,0.00164414,1.30103e-06,1.51241e-10,0.598544,0.00164675,1.30148e-06,1.97902e-08,0.600192,0.00164941,1.36085e-06,-1.97074e-08,0.601843,0.00165207,1.30173e-06,-5.65175e-10,0.603496,0.00165467,1.30004e-06,2.1968e-08,0.605152,0.00165734,1.36594e-06,-2.77024e-08,0.606811,0.00165999,1.28283e-06,2.92369e-08,0.608472,0.00166264,1.37054e-06,-2.96407e-08,0.610136,0.00166529,1.28162e-06,2.97215e-08,0.611803,0.00166795,1.37079e-06,-2.96408e-08,0.613472,0.0016706,1.28186e-06,2.92371e-08,0.615144,0.00167325,1.36957e-06,-2.77031e-08,0.616819,0.00167591,1.28647e-06,2.19708e-08,0.618496,0.00167855,1.35238e-06,-5.75407e-10,0.620176,0.00168125,1.35065e-06,-1.9669e-08,0.621858,0.00168389,1.29164e-06,1.96468e-08,0.623544,0.00168653,1.35058e-06,6.86403e-10,0.625232,0.00168924,1.35264e-06,-2.23924e-08,0.626922,0.00169187,1.28547e-06,2.92788e-08,0.628615,0.00169453,1.3733e-06,-3.51181e-08,0.630311,0.00169717,1.26795e-06,5.15889e-08,0.63201,0.00169987,1.42272e-06,-5.2028e-08,0.633711,0.00170255,1.26663e-06,3.73139e-08,0.635415,0.0017052,1.37857e-06,-3.76227e-08,0.637121,0.00170784,1.2657e-06,5.35722e-08,0.63883,0.00171054,1.42642e-06,-5.74567e-08,0.640542,0.00171322,1.25405e-06,5.70456e-08,0.642257,0.0017159,1.42519e-06,-5.15163e-08,0.643974,0.00171859,1.27064e-06,2.98103e-08,0.645694,0.00172122,1.36007e-06,-8.12016e-09,0.647417,0.00172392,1.33571e-06,2.67039e-09,0.649142,0.0017266,1.34372e-06,-2.56152e-09,0.65087,0.00172928,1.33604e-06,7.57571e-09,0.6526,0.00173197,1.35876e-06,-2.77413e-08,0.654334,0.00173461,1.27554e-06,4.3785e-08,0.65607,0.00173729,1.40689e-06,-2.81896e-08,0.657808,0.00174002,1.32233e-06,9.36893e-09,0.65955,0.00174269,1.35043e-06,-9.28617e-09,0.661294,0.00174536,1.32257e-06,2.77757e-08,0.66304,0.00174809,1.4059e-06,-4.2212e-08,0.66479,0.00175078,1.27926e-06,2.1863e-08,0.666542,0.0017534,1.34485e-06,1.43648e-08,0.668297,0.00175613,1.38795e-06,-1.97177e-08,0.670054,0.00175885,1.3288e-06,4.90115e-09,0.671814,0.00176152,1.3435e-06,1.13232e-10,0.673577,0.00176421,1.34384e-06,-5.3542e-09,0.675343,0.00176688,1.32778e-06,2.13035e-08,0.677111,0.0017696,1.39169e-06,-2.02553e-08,0.678882,0.00177232,1.33092e-06,1.13005e-10,0.680656,0.00177499,1.33126e-06,1.98031e-08,0.682432,0.00177771,1.39067e-06,-1.97211e-08,0.684211,0.00178043,1.33151e-06,-5.2349e-10,0.685993,0.00178309,1.32994e-06,2.18151e-08,0.687777,0.00178582,1.39538e-06,-2.71325e-08,0.689564,0.00178853,1.31398e-06,2.71101e-08,0.691354,0.00179124,1.39531e-06,-2.17035e-08,0.693147,0.00179396,1.3302e-06,9.92865e-11,0.694942,0.00179662,1.3305e-06,2.13063e-08,0.69674,0.00179935,1.39442e-06,-2.57198e-08,0.698541,0.00180206,1.31726e-06,2.19682e-08,0.700344,0.00180476,1.38317e-06,-2.54852e-09,0.70215,0.00180752,1.37552e-06,-1.17741e-08,0.703959,0.00181023,1.3402e-06,-9.95999e-09,0.705771,0.00181288,1.31032e-06,5.16141e-08,0.707585,0.00181566,1.46516e-06,-7.72869e-08,0.709402,0.00181836,1.2333e-06,7.87197e-08,0.711222,0.00182106,1.46946e-06,-5.87781e-08,0.713044,0.00182382,1.29312e-06,3.71834e-08,0.714869,0.00182652,1.40467e-06,-3.03511e-08,0.716697,0.00182924,1.31362e-06,2.46161e-08,0.718528,0.00183194,1.38747e-06,-8.5087e-09,0.720361,0.00183469,1.36194e-06,9.41892e-09,0.722197,0.00183744,1.3902e-06,-2.91671e-08,0.724036,0.00184014,1.3027e-06,4.76448e-08,0.725878,0.00184288,1.44563e-06,-4.22028e-08,0.727722,0.00184565,1.31902e-06,1.95682e-09,0.729569,0.00184829,1.3249e-06,3.43754e-08,0.731419,0.00185104,1.42802e-06,-2.0249e-08,0.733271,0.00185384,1.36727e-06,-1.29838e-08,0.735126,0.00185654,1.32832e-06,1.25794e-08,0.736984,0.00185923,1.36606e-06,2.22711e-08,0.738845,0.00186203,1.43287e-06,-4.20594e-08,0.740708,0.00186477,1.3067e-06,2.67571e-08,0.742574,0.00186746,1.38697e-06,-5.36424e-09,0.744443,0.00187022,1.37087e-06,-5.30023e-09,0.746315,0.00187295,1.35497e-06,2.65653e-08,0.748189,0.00187574,1.43467e-06,-4.13564e-08,0.750066,0.00187848,1.3106e-06,1.9651e-08,0.751946,0.00188116,1.36955e-06,2.23572e-08,0.753828,0.00188397,1.43663e-06,-4.9475e-08,0.755714,0.00188669,1.2882e-06,5.63335e-08,0.757602,0.00188944,1.4572e-06,-5.66499e-08,0.759493,0.00189218,1.28725e-06,5.10567e-08,0.761386,0.00189491,1.44042e-06,-2.83677e-08,0.763283,0.00189771,1.35532e-06,2.80962e-09,0.765182,0.00190042,1.36375e-06,1.71293e-08,0.767083,0.0019032,1.41513e-06,-1.17221e-08,0.768988,0.001906,1.37997e-06,-2.98453e-08,0.770895,0.00190867,1.29043e-06,7.14987e-08,0.772805,0.00191146,1.50493e-06,-7.73354e-08,0.774718,0.00191424,1.27292e-06,5.90292e-08,0.776634,0.00191697,1.45001e-06,-3.9572e-08,0.778552,0.00191975,1.33129e-06,3.9654e-08,0.780473,0.00192253,1.45026e-06,-5.94395e-08,0.782397,0.00192525,1.27194e-06,7.88945e-08,0.784324,0.00192803,1.50862e-06,-7.73249e-08,0.786253,0.00193082,1.27665e-06,5.15913e-08,0.788185,0.00193352,1.43142e-06,-9.83099e-09,0.79012,0.00193636,1.40193e-06,-1.22672e-08,0.792058,0.00193912,1.36513e-06,-7.05275e-10,0.793999,0.00194185,1.36301e-06,1.50883e-08,0.795942,0.00194462,1.40828e-06,-4.33147e-11,0.797888,0.00194744,1.40815e-06,-1.49151e-08,0.799837,0.00195021,1.3634e-06,9.93244e-11,0.801788,0.00195294,1.3637e-06,1.45179e-08,0.803743,0.00195571,1.40725e-06,1.43363e-09,0.8057,0.00195853,1.41155e-06,-2.02525e-08,0.80766,0.00196129,1.35079e-06,1.99718e-08,0.809622,0.00196405,1.41071e-06,-3.01649e-11,0.811588,0.00196687,1.41062e-06,-1.9851e-08,0.813556,0.00196964,1.35107e-06,1.98296e-08,0.815527,0.0019724,1.41056e-06,1.37485e-10,0.817501,0.00197522,1.41097e-06,-2.03796e-08,0.819477,0.00197798,1.34983e-06,2.17763e-08,0.821457,0.00198074,1.41516e-06,-7.12085e-09,0.823439,0.00198355,1.3938e-06,6.70707e-09,0.825424,0.00198636,1.41392e-06,-1.97074e-08,0.827412,0.00198913,1.35479e-06,1.25179e-08,0.829402,0.00199188,1.39235e-06,2.92405e-08,0.831396,0.00199475,1.48007e-06,-6.98755e-08,0.833392,0.0019975,1.27044e-06,7.14477e-08,0.835391,0.00200026,1.48479e-06,-3.71014e-08,0.837392,0.00200311,1.37348e-06,1.73533e-08,0.839397,0.00200591,1.42554e-06,-3.23118e-08,0.841404,0.00200867,1.32861e-06,5.2289e-08,0.843414,0.00201148,1.48547e-06,-5.76348e-08,0.845427,0.00201428,1.31257e-06,5.9041e-08,0.847443,0.00201708,1.48969e-06,-5.93197e-08,0.849461,0.00201988,1.31173e-06,5.90289e-08,0.851482,0.00202268,1.48882e-06,-5.75864e-08,0.853507,0.00202549,1.31606e-06,5.21075e-08,0.855533,0.00202828,1.47238e-06,-3.16344e-08,0.857563,0.00203113,1.37748e-06,1.48257e-08,0.859596,0.00203393,1.42196e-06,-2.76684e-08,0.861631,0.00203669,1.33895e-06,3.62433e-08,0.863669,0.00203947,1.44768e-06,1.90463e-09,0.86571,0.00204237,1.45339e-06,-4.38617e-08,0.867754,0.00204515,1.32181e-06,5.43328e-08,0.8698,0.00204796,1.48481e-06,-5.42603e-08,0.87185,0.00205076,1.32203e-06,4.34989e-08,0.873902,0.00205354,1.45252e-06,-5.26029e-10,0.875957,0.00205644,1.45095e-06,-4.13949e-08,0.878015,0.00205922,1.32676e-06,4.68962e-08,0.880075,0.00206201,1.46745e-06,-2.69807e-08,0.882139,0.00206487,1.38651e-06,1.42181e-09,0.884205,0.00206764,1.39077e-06,2.12935e-08,0.886274,0.00207049,1.45465e-06,-2.69912e-08,0.888346,0.00207332,1.37368e-06,2.70664e-08,0.890421,0.00207615,1.45488e-06,-2.16698e-08,0.892498,0.00207899,1.38987e-06,8.14756e-12,0.894579,0.00208177,1.38989e-06,2.16371e-08,0.896662,0.00208462,1.45481e-06,-2.6952e-08,0.898748,0.00208744,1.37395e-06,2.65663e-08,0.900837,0.00209027,1.45365e-06,-1.97084e-08,0.902928,0.00209312,1.39452e-06,-7.33731e-09,0.905023,0.00209589,1.37251e-06,4.90578e-08,0.90712,0.00209878,1.51968e-06,-6.96845e-08,0.90922,0.00210161,1.31063e-06,5.08664e-08,0.911323,0.00210438,1.46323e-06,-1.45717e-08,0.913429,0.00210727,1.41952e-06,7.42038e-09,0.915538,0.00211013,1.44178e-06,-1.51097e-08,0.917649,0.00211297,1.39645e-06,-6.58618e-09,0.919764,0.00211574,1.37669e-06,4.14545e-08,0.921881,0.00211862,1.50105e-06,-4.00222e-08,0.924001,0.0021215,1.38099e-06,-5.7518e-10,0.926124,0.00212426,1.37926e-06,4.23229e-08,0.92825,0.00212714,1.50623e-06,-4.9507e-08,0.930378,0.00213001,1.35771e-06,3.64958e-08,0.93251,0.00213283,1.4672e-06,-3.68713e-08,0.934644,0.00213566,1.35658e-06,5.13848e-08,0.936781,0.00213852,1.51074e-06,-4.94585e-08,0.938921,0.0021414,1.36236e-06,2.72399e-08,0.941064,0.0021442,1.44408e-06,1.0372e-10,0.943209,0.00214709,1.44439e-06,-2.76547e-08,0.945358,0.0021499,1.36143e-06,5.09106e-08,0.947509,0.00215277,1.51416e-06,-5.67784e-08,0.949663,0.00215563,1.34382e-06,5.69935e-08,0.95182,0.00215849,1.5148e-06,-5.19861e-08,0.95398,0.00216136,1.35885e-06,3.17417e-08,0.956143,0.00216418,1.45407e-06,-1.53758e-08,0.958309,0.00216704,1.40794e-06,2.97615e-08,0.960477,0.00216994,1.49723e-06,-4.40657e-08,0.962649,0.00217281,1.36503e-06,2.72919e-08,0.964823,0.00217562,1.44691e-06,-5.49729e-09,0.967,0.0021785,1.43041e-06,-5.30273e-09,0.96918,0.00218134,1.41451e-06,2.67084e-08,0.971363,0.00218425,1.49463e-06,-4.19265e-08,0.973548,0.00218711,1.36885e-06,2.17881e-08,0.975737,0.00218992,1.43422e-06,1.43789e-08,0.977928,0.00219283,1.47735e-06,-1.96989e-08,0.980122,0.00219572,1.41826e-06,4.81221e-09,0.98232,0.00219857,1.43269e-06,4.50048e-10,0.98452,0.00220144,1.43404e-06,-6.61237e-09,0.986722,0.00220429,1.41421e-06,2.59993e-08,0.988928,0.0022072,1.4922e-06,-3.77803e-08,0.991137,0.00221007,1.37886e-06,5.9127e-09,0.993348,0.00221284,1.3966e-06,1.33339e-07,0.995563,0.00221604,1.79662e-06,-5.98872e-07,0.99778,0.00222015,0.,0.};
+
+ template <bool srgb, int blueIdx, typename T, typename D>
+ __device__ __forceinline__ void RGB2LabConvert_32F(const T& src, D& dst)
+ {
+ const float _1_3 = 1.0f / 3.0f;
+ const float _a = 16.0f / 116.0f;
+
+ float B = blueIdx == 0 ? src.x : src.z;
+ float G = src.y;
+ float R = blueIdx == 0 ? src.z : src.x;
+
+ if (srgb)
+ {
+ B = splineInterpolate(B * GAMMA_TAB_SIZE, c_sRGBGammaTab, GAMMA_TAB_SIZE);
+ G = splineInterpolate(G * GAMMA_TAB_SIZE, c_sRGBGammaTab, GAMMA_TAB_SIZE);
+ R = splineInterpolate(R * GAMMA_TAB_SIZE, c_sRGBGammaTab, GAMMA_TAB_SIZE);
+ }
+
+ float X = B * 0.189828f + G * 0.376219f + R * 0.433953f;
+ float Y = B * 0.072169f + G * 0.715160f + R * 0.212671f;
+ float Z = B * 0.872766f + G * 0.109477f + R * 0.017758f;
+
+ float FX = X > 0.008856f ? ::powf(X, _1_3) : (7.787f * X + _a);
+ float FY = Y > 0.008856f ? ::powf(Y, _1_3) : (7.787f * Y + _a);
+ float FZ = Z > 0.008856f ? ::powf(Z, _1_3) : (7.787f * Z + _a);
+
+ float L = Y > 0.008856f ? (116.f * FY - 16.f) : (903.3f * Y);
+ float a = 500.f * (FX - FY);
+ float b = 200.f * (FY - FZ);
+
+ dst.x = L;
+ dst.y = a;
+ dst.z = b;
+ }
+
+ template <typename T, int scn, int dcn, bool srgb, int blueIdx> struct RGB2Lab;
+
+ template <int scn, int dcn, bool srgb, int blueIdx>
+ struct RGB2Lab<uchar, scn, dcn, srgb, blueIdx>
+ : unary_function<typename TypeVec<uchar, scn>::vec_type, typename TypeVec<uchar, dcn>::vec_type>
+ {
+ __device__ __forceinline__ typename TypeVec<uchar, dcn>::vec_type operator ()(const typename TypeVec<uchar, scn>::vec_type& src) const
+ {
+ typename TypeVec<uchar, dcn>::vec_type dst;
+
+ RGB2LabConvert_8U<srgb, blueIdx>(src, dst);
+
+ return dst;
+ }
+
+ __host__ __device__ __forceinline__ RGB2Lab() {}
+ __host__ __device__ __forceinline__ RGB2Lab(const RGB2Lab&) {}
+ };
+
+ template <int scn, int dcn, bool srgb, int blueIdx>
+ struct RGB2Lab<float, scn, dcn, srgb, blueIdx>
+ : unary_function<typename TypeVec<float, scn>::vec_type, typename TypeVec<float, dcn>::vec_type>
+ {
+ __device__ __forceinline__ typename TypeVec<float, dcn>::vec_type operator ()(const typename TypeVec<float, scn>::vec_type& src) const
+ {
+ typename TypeVec<float, dcn>::vec_type dst;
+
+ RGB2LabConvert_32F<srgb, blueIdx>(src, dst);
+
+ return dst;
+ }
+
+ __host__ __device__ __forceinline__ RGB2Lab() {}
+ __host__ __device__ __forceinline__ RGB2Lab(const RGB2Lab&) {}
+ };
+ }
+
+#define OPENCV_GPU_IMPLEMENT_RGB2Lab_TRAITS(name, scn, dcn, srgb, blueIdx) \
+ template <typename T> struct name ## _traits \
+ { \
+ typedef ::cv::gpu::device::color_detail::RGB2Lab<T, scn, dcn, srgb, blueIdx> functor_type; \
+ static __host__ __device__ __forceinline__ functor_type create_functor() \
+ { \
+ return functor_type(); \
+ } \
+ };
+
+ namespace color_detail
+ {
+ __constant__ float c_sRGBInvGammaTab[] = {0,0.0126255,0.,-8.33961e-06,0.0126172,0.0126005,-2.50188e-05,4.1698e-05,0.0252344,0.0126756,0.000100075,-0.000158451,0.0378516,0.0124004,-0.000375277,-0.000207393,0.0496693,0.0110276,-0.000997456,0.00016837,0.0598678,0.00953783,-0.000492346,2.07235e-05,0.068934,0.00861531,-0.000430176,3.62876e-05,0.0771554,0.00786382,-0.000321313,1.87625e-05,0.0847167,0.00727748,-0.000265025,1.53594e-05,0.0917445,0.00679351,-0.000218947,1.10545e-05,0.0983301,0.00638877,-0.000185784,8.66984e-06,0.104542,0.00604322,-0.000159774,6.82996e-06,0.110432,0.00574416,-0.000139284,5.51008e-06,0.116042,0.00548212,-0.000122754,4.52322e-06,0.121406,0.00525018,-0.000109184,3.75557e-06,0.126551,0.00504308,-9.79177e-05,3.17134e-06,0.131499,0.00485676,-8.84037e-05,2.68469e-06,0.13627,0.004688,-8.03496e-05,2.31725e-06,0.14088,0.00453426,-7.33978e-05,2.00868e-06,0.145343,0.00439349,-6.73718e-05,1.74775e-06,0.149671,0.00426399,-6.21286e-05,1.53547e-06,0.153875,0.00414434,-5.75222e-05,1.364e-06,0.157963,0.00403338,-5.34301e-05,1.20416e-06,0.161944,0.00393014,-4.98177e-05,1.09114e-06,0.165825,0.00383377,-4.65443e-05,9.57987e-07,0.169613,0.00374356,-4.36703e-05,8.88359e-07,0.173314,0.00365888,-4.10052e-05,7.7849e-07,0.176933,0.00357921,-3.86697e-05,7.36254e-07,0.180474,0.00350408,-3.6461e-05,6.42534e-07,0.183942,0.00343308,-3.45334e-05,6.12614e-07,0.187342,0.00336586,-3.26955e-05,5.42894e-07,0.190675,0.00330209,-3.10669e-05,5.08967e-07,0.193947,0.00324149,-2.954e-05,4.75977e-07,0.197159,0.00318383,-2.8112e-05,4.18343e-07,0.200315,0.00312887,-2.6857e-05,4.13651e-07,0.203418,0.00307639,-2.5616e-05,3.70847e-07,0.206469,0.00302627,-2.45035e-05,3.3813e-07,0.209471,0.00297828,-2.34891e-05,3.32999e-07,0.212426,0.0029323,-2.24901e-05,2.96826e-07,0.215336,0.00288821,-2.15996e-05,2.82736e-07,0.218203,0.00284586,-2.07514e-05,2.70961e-07,0.221029,0.00280517,-1.99385e-05,2.42744e-07,0.223814,0.00276602,-1.92103e-05,2.33277e-07,0.226561,0.0027283,-1.85105e-05,2.2486e-07,0.229271,0.00269195,-1.78359e-05,2.08383e-07,0.231945,0.00265691,-1.72108e-05,1.93305e-07,0.234585,0.00262307,-1.66308e-05,1.80687e-07,0.237192,0.00259035,-1.60888e-05,1.86632e-07,0.239766,0.00255873,-1.55289e-05,1.60569e-07,0.24231,0.00252815,-1.50472e-05,1.54566e-07,0.244823,0.00249852,-1.45835e-05,1.59939e-07,0.247307,0.00246983,-1.41037e-05,1.29549e-07,0.249763,0.00244202,-1.3715e-05,1.41429e-07,0.252191,0.00241501,-1.32907e-05,1.39198e-07,0.254593,0.00238885,-1.28731e-05,1.06444e-07,0.256969,0.00236342,-1.25538e-05,1.2048e-07,0.25932,0.00233867,-1.21924e-05,1.26892e-07,0.261647,0.00231467,-1.18117e-05,8.72084e-08,0.26395,0.00229131,-1.15501e-05,1.20323e-07,0.26623,0.00226857,-1.11891e-05,8.71514e-08,0.268487,0.00224645,-1.09276e-05,9.73165e-08,0.270723,0.00222489,-1.06357e-05,8.98259e-08,0.272937,0.00220389,-1.03662e-05,7.98218e-08,0.275131,0.00218339,-1.01267e-05,9.75254e-08,0.277304,0.00216343,-9.83416e-06,6.65195e-08,0.279458,0.00214396,-9.63461e-06,8.34313e-08,0.281592,0.00212494,-9.38431e-06,7.65919e-08,0.283708,0.00210641,-9.15454e-06,5.7236e-08,0.285805,0.00208827,-8.98283e-06,8.18939e-08,0.287885,0.00207055,-8.73715e-06,6.2224e-08,0.289946,0.00205326,-8.55047e-06,5.66388e-08,0.291991,0.00203633,-8.38056e-06,6.88491e-08,0.294019,0.00201978,-8.17401e-06,5.53955e-08,0.296031,0.00200359,-8.00782e-06,6.71971e-08,0.298027,0.00198778,-7.80623e-06,3.34439e-08,0.300007,0.00197227,-7.7059e-06,6.7248e-08,0.301971,0.00195706,-7.50416e-06,5.51915e-08,0.303921,0.00194221,-7.33858e-06,3.98124e-08,0.305856,0.00192766,-7.21915e-06,5.37795e-08,0.307776,0.00191338,-7.05781e-06,4.30919e-08,0.309683,0.00189939,-6.92853e-06,4.20744e-08,0.311575,0.00188566,-6.80231e-06,5.68321e-08,0.313454,0.00187223,-6.63181e-06,2.86195e-08,0.31532,0.00185905,-6.54595e-06,3.73075e-08,0.317172,0.00184607,-6.43403e-06,6.05684e-08,0.319012,0.00183338,-6.25233e-06,1.84426e-08,0.320839,0.00182094,-6.197e-06,4.44757e-08,0.322654,0.00180867,-6.06357e-06,4.20729e-08,0.324456,0.00179667,-5.93735e-06,2.56511e-08,0.326247,0.00178488,-5.8604e-06,3.41368e-08,0.328026,0.00177326,-5.75799e-06,4.64177e-08,0.329794,0.00176188,-5.61874e-06,1.86107e-08,0.33155,0.0017507,-5.5629e-06,2.81511e-08,0.333295,0.00173966,-5.47845e-06,4.75987e-08,0.335029,0.00172884,-5.33565e-06,1.98726e-08,0.336753,0.00171823,-5.27604e-06,2.19226e-08,0.338466,0.00170775,-5.21027e-06,4.14483e-08,0.340169,0.00169745,-5.08592e-06,2.09017e-08,0.341861,0.00168734,-5.02322e-06,2.39561e-08,0.343543,0.00167737,-4.95135e-06,3.22852e-08,0.345216,0.00166756,-4.85449e-06,2.57173e-08,0.346878,0.00165793,-4.77734e-06,1.38569e-08,0.348532,0.00164841,-4.73577e-06,3.80634e-08,0.350175,0.00163906,-4.62158e-06,1.27043e-08,0.35181,0.00162985,-4.58347e-06,3.03279e-08,0.353435,0.00162078,-4.49249e-06,1.49961e-08,0.355051,0.00161184,-4.4475e-06,2.88977e-08,0.356659,0.00160303,-4.3608e-06,1.84241e-08,0.358257,0.00159436,-4.30553e-06,1.6616e-08,0.359848,0.0015858,-4.25568e-06,3.43218e-08,0.361429,0.00157739,-4.15272e-06,-4.89172e-09,0.363002,0.00156907,-4.16739e-06,4.48498e-08,0.364567,0.00156087,-4.03284e-06,4.30676e-09,0.366124,0.00155282,-4.01992e-06,2.73303e-08,0.367673,0.00154486,-3.93793e-06,5.58036e-09,0.369214,0.001537,-3.92119e-06,3.97554e-08,0.370747,0.00152928,-3.80193e-06,-1.55904e-08,0.372272,0.00152163,-3.8487e-06,5.24081e-08,0.37379,0.00151409,-3.69147e-06,-1.52272e-08,0.375301,0.00150666,-3.73715e-06,3.83028e-08,0.376804,0.0014993,-3.62225e-06,1.10278e-08,0.378299,0.00149209,-3.58916e-06,6.99326e-09,0.379788,0.00148493,-3.56818e-06,2.06038e-08,0.381269,0.00147786,-3.50637e-06,2.98009e-08,0.382744,0.00147093,-3.41697e-06,-2.05978e-08,0.384211,0.00146404,-3.47876e-06,5.25899e-08,0.385672,0.00145724,-3.32099e-06,-1.09471e-08,0.387126,0.00145056,-3.35383e-06,2.10009e-08,0.388573,0.00144392,-3.29083e-06,1.63501e-08,0.390014,0.00143739,-3.24178e-06,3.00641e-09,0.391448,0.00143091,-3.23276e-06,3.12282e-08,0.392875,0.00142454,-3.13908e-06,-8.70932e-09,0.394297,0.00141824,-3.16521e-06,3.34114e-08,0.395712,0.00141201,-3.06497e-06,-5.72754e-09,0.397121,0.00140586,-3.08215e-06,1.9301e-08,0.398524,0.00139975,-3.02425e-06,1.7931e-08,0.39992,0.00139376,-2.97046e-06,-1.61822e-09,0.401311,0.00138781,-2.97531e-06,1.83442e-08,0.402696,0.00138192,-2.92028e-06,1.76485e-08,0.404075,0.00137613,-2.86733e-06,4.68617e-10,0.405448,0.00137039,-2.86593e-06,1.02794e-08,0.406816,0.00136469,-2.83509e-06,1.80179e-08,0.408178,0.00135908,-2.78104e-06,7.05594e-09,0.409534,0.00135354,-2.75987e-06,1.33633e-08,0.410885,0.00134806,-2.71978e-06,-9.04568e-10,0.41223,0.00134261,-2.72249e-06,2.0057e-08,0.41357,0.00133723,-2.66232e-06,1.00841e-08,0.414905,0.00133194,-2.63207e-06,-7.88835e-10,0.416234,0.00132667,-2.63444e-06,2.28734e-08,0.417558,0.00132147,-2.56582e-06,-1.29785e-09,0.418877,0.00131633,-2.56971e-06,1.21205e-08,0.420191,0.00131123,-2.53335e-06,1.24202e-08,0.421499,0.0013062,-2.49609e-06,-2.19681e-09,0.422803,0.0013012,-2.50268e-06,2.61696e-08,0.424102,0.00129628,-2.42417e-06,-1.30747e-08,0.425396,0.00129139,-2.46339e-06,2.6129e-08,0.426685,0.00128654,-2.38501e-06,-2.03454e-09,0.427969,0.00128176,-2.39111e-06,1.18115e-08,0.429248,0.00127702,-2.35567e-06,1.43932e-08,0.430523,0.00127235,-2.31249e-06,-9.77965e-09,0.431793,0.00126769,-2.34183e-06,2.47253e-08,0.433058,0.00126308,-2.26766e-06,2.85278e-10,0.434319,0.00125855,-2.2668e-06,3.93614e-09,0.435575,0.00125403,-2.25499e-06,1.37722e-08,0.436827,0.00124956,-2.21368e-06,5.79803e-10,0.438074,0.00124513,-2.21194e-06,1.37112e-08,0.439317,0.00124075,-2.1708e-06,4.17973e-09,0.440556,0.00123642,-2.15826e-06,-6.27703e-10,0.44179,0.0012321,-2.16015e-06,2.81332e-08,0.44302,0.00122787,-2.07575e-06,-2.24985e-08,0.444246,0.00122365,-2.14324e-06,3.20586e-08,0.445467,0.00121946,-2.04707e-06,-1.6329e-08,0.446685,0.00121532,-2.09605e-06,3.32573e-08,0.447898,0.00121122,-1.99628e-06,-2.72927e-08,0.449107,0.00120715,-2.07816e-06,4.6111e-08,0.450312,0.00120313,-1.93983e-06,-3.79416e-08,0.451514,0.00119914,-2.05365e-06,4.60507e-08,0.452711,0.00119517,-1.9155e-06,-2.7052e-08,0.453904,0.00119126,-1.99666e-06,3.23551e-08,0.455093,0.00118736,-1.89959e-06,-1.29613e-08,0.456279,0.00118352,-1.93848e-06,1.94905e-08,0.45746,0.0011797,-1.88e-06,-5.39588e-09,0.458638,0.00117593,-1.89619e-06,2.09282e-09,0.459812,0.00117214,-1.88991e-06,2.68267e-08,0.460982,0.00116844,-1.80943e-06,-1.99925e-08,0.462149,0.00116476,-1.86941e-06,2.3341e-08,0.463312,0.00116109,-1.79939e-06,-1.37674e-08,0.464471,0.00115745,-1.84069e-06,3.17287e-08,0.465627,0.00115387,-1.7455e-06,-2.37407e-08,0.466779,0.00115031,-1.81673e-06,3.34315e-08,0.467927,0.00114677,-1.71643e-06,-2.05786e-08,0.469073,0.00114328,-1.77817e-06,1.90802e-08,0.470214,0.00113978,-1.72093e-06,3.86247e-09,0.471352,0.00113635,-1.70934e-06,-4.72759e-09,0.472487,0.00113292,-1.72352e-06,1.50478e-08,0.473618,0.00112951,-1.67838e-06,4.14108e-09,0.474746,0.00112617,-1.66595e-06,-1.80986e-09,0.47587,0.00112283,-1.67138e-06,3.09816e-09,0.476991,0.0011195,-1.66209e-06,1.92198e-08,0.478109,0.00111623,-1.60443e-06,-2.03726e-08,0.479224,0.00111296,-1.66555e-06,3.2468e-08,0.480335,0.00110973,-1.56814e-06,-2.00922e-08,0.481443,0.00110653,-1.62842e-06,1.80983e-08,0.482548,0.00110333,-1.57413e-06,7.30362e-09,0.48365,0.0011002,-1.55221e-06,-1.75107e-08,0.484749,0.00109705,-1.60475e-06,3.29373e-08,0.485844,0.00109393,-1.50594e-06,-2.48315e-08,0.486937,0.00109085,-1.58043e-06,3.65865e-08,0.488026,0.0010878,-1.47067e-06,-3.21078e-08,0.489112,0.00108476,-1.56699e-06,3.22397e-08,0.490195,0.00108172,-1.47027e-06,-7.44391e-09,0.491276,0.00107876,-1.49261e-06,-2.46428e-09,0.492353,0.00107577,-1.5e-06,1.73011e-08,0.493427,0.00107282,-1.4481e-06,-7.13552e-09,0.494499,0.0010699,-1.4695e-06,1.1241e-08,0.495567,0.001067,-1.43578e-06,-8.02637e-09,0.496633,0.0010641,-1.45986e-06,2.08645e-08,0.497695,0.00106124,-1.39726e-06,-1.58271e-08,0.498755,0.0010584,-1.44475e-06,1.26415e-08,0.499812,0.00105555,-1.40682e-06,2.48655e-08,0.500866,0.00105281,-1.33222e-06,-5.24988e-08,0.501918,0.00104999,-1.48972e-06,6.59206e-08,0.502966,0.00104721,-1.29196e-06,-3.237e-08,0.504012,0.00104453,-1.38907e-06,3.95479e-09,0.505055,0.00104176,-1.3772e-06,1.65509e-08,0.506096,0.00103905,-1.32755e-06,-1.05539e-08,0.507133,0.00103637,-1.35921e-06,2.56648e-08,0.508168,0.00103373,-1.28222e-06,-3.25007e-08,0.509201,0.00103106,-1.37972e-06,4.47336e-08,0.51023,0.00102844,-1.24552e-06,-2.72245e-08,0.511258,0.00102587,-1.32719e-06,4.55952e-09,0.512282,0.00102323,-1.31352e-06,8.98645e-09,0.513304,0.00102063,-1.28656e-06,1.90992e-08,0.514323,0.00101811,-1.22926e-06,-2.57786e-08,0.51534,0.00101557,-1.30659e-06,2.44104e-08,0.516355,0.00101303,-1.23336e-06,-1.22581e-08,0.517366,0.00101053,-1.27014e-06,2.4622e-08,0.518376,0.00100806,-1.19627e-06,-2.66253e-08,0.519383,0.00100559,-1.27615e-06,2.22744e-08,0.520387,0.00100311,-1.20932e-06,-2.8679e-09,0.521389,0.00100068,-1.21793e-06,-1.08029e-08,0.522388,0.000998211,-1.25034e-06,4.60795e-08,0.523385,0.000995849,-1.1121e-06,-5.4306e-08,0.52438,0.000993462,-1.27502e-06,5.19354e-08,0.525372,0.000991067,-1.11921e-06,-3.42262e-08,0.526362,0.000988726,-1.22189e-06,2.53646e-08,0.52735,0.000986359,-1.14579e-06,-7.62782e-09,0.528335,0.000984044,-1.16868e-06,5.14668e-09,0.529318,0.000981722,-1.15324e-06,-1.29589e-08,0.530298,0.000979377,-1.19211e-06,4.66888e-08,0.531276,0.000977133,-1.05205e-06,-5.45868e-08,0.532252,0.000974865,-1.21581e-06,5.24495e-08,0.533226,0.000972591,-1.05846e-06,-3.60019e-08,0.534198,0.000970366,-1.16647e-06,3.19537e-08,0.535167,0.000968129,-1.07061e-06,-3.2208e-08,0.536134,0.000965891,-1.16723e-06,3.72738e-08,0.537099,0.000963668,-1.05541e-06,2.32205e-09,0.538061,0.000961564,-1.04844e-06,-4.65618e-08,0.539022,0.000959328,-1.18813e-06,6.47159e-08,0.53998,0.000957146,-9.93979e-07,-3.3488e-08,0.540936,0.000955057,-1.09444e-06,9.63166e-09,0.54189,0.000952897,-1.06555e-06,-5.03871e-09,0.542842,0.000950751,-1.08066e-06,1.05232e-08,0.543792,0.000948621,-1.04909e-06,2.25503e-08,0.544739,0.000946591,-9.81444e-07,-4.11195e-08,0.545685,0.000944504,-1.1048e-06,2.27182e-08,0.546628,0.000942363,-1.03665e-06,9.85146e-09,0.54757,0.000940319,-1.00709e-06,-2.51938e-09,0.548509,0.000938297,-1.01465e-06,2.25858e-10,0.549446,0.000936269,-1.01397e-06,1.61598e-09,0.550381,0.000934246,-1.00913e-06,-6.68983e-09,0.551315,0.000932207,-1.0292e-06,2.51434e-08,0.552246,0.000930224,-9.53765e-07,-3.42793e-08,0.553175,0.000928214,-1.0566e-06,5.23688e-08,0.554102,0.000926258,-8.99497e-07,-5.59865e-08,0.555028,0.000924291,-1.06746e-06,5.23679e-08,0.555951,0.000922313,-9.10352e-07,-3.42763e-08,0.556872,0.00092039,-1.01318e-06,2.51326e-08,0.557792,0.000918439,-9.37783e-07,-6.64954e-09,0.558709,0.000916543,-9.57732e-07,1.46554e-09,0.559625,0.000914632,-9.53335e-07,7.87281e-10,0.560538,0.000912728,-9.50973e-07,-4.61466e-09,0.56145,0.000910812,-9.64817e-07,1.76713e-08,0.56236,0.000908935,-9.11804e-07,-6.46564e-09,0.563268,0.000907092,-9.312e-07,8.19121e-09,0.564174,0.000905255,-9.06627e-07,-2.62992e-08,0.565078,0.000903362,-9.85524e-07,3.74007e-08,0.565981,0.000901504,-8.73322e-07,-4.0942e-09,0.566882,0.000899745,-8.85605e-07,-2.1024e-08,0.56778,0.00089791,-9.48677e-07,2.85854e-08,0.568677,0.000896099,-8.62921e-07,-3.3713e-08,0.569573,0.000894272,-9.64059e-07,4.6662e-08,0.570466,0.000892484,-8.24073e-07,-3.37258e-08,0.571358,0.000890734,-9.25251e-07,2.86365e-08,0.572247,0.00088897,-8.39341e-07,-2.12155e-08,0.573135,0.000887227,-9.02988e-07,-3.37913e-09,0.574022,0.000885411,-9.13125e-07,3.47319e-08,0.574906,0.000883689,-8.08929e-07,-1.63394e-08,0.575789,0.000882022,-8.57947e-07,-2.8979e-08,0.57667,0.00088022,-9.44885e-07,7.26509e-08,0.57755,0.000878548,-7.26932e-07,-8.28106e-08,0.578427,0.000876845,-9.75364e-07,7.97774e-08,0.579303,0.000875134,-7.36032e-07,-5.74849e-08,0.580178,0.00087349,-9.08486e-07,3.09529e-08,0.58105,0.000871765,-8.15628e-07,-6.72206e-09,0.581921,0.000870114,-8.35794e-07,-4.06451e-09,0.582791,0.00086843,-8.47987e-07,2.29799e-08,0.583658,0.000866803,-7.79048e-07,-2.82503e-08,0.584524,0.00086516,-8.63799e-07,3.04167e-08,0.585388,0.000863524,-7.72548e-07,-3.38119e-08,0.586251,0.000861877,-8.73984e-07,4.52264e-08,0.587112,0.000860265,-7.38305e-07,-2.78842e-08,0.587972,0.000858705,-8.21958e-07,6.70567e-09,0.58883,0.000857081,-8.01841e-07,1.06161e-09,0.589686,0.000855481,-7.98656e-07,-1.09521e-08,0.590541,0.00085385,-8.31512e-07,4.27468e-08,0.591394,0.000852316,-7.03272e-07,-4.08257e-08,0.592245,0.000850787,-8.25749e-07,1.34677e-09,0.593095,0.000849139,-8.21709e-07,3.54387e-08,0.593944,0.000847602,-7.15393e-07,-2.38924e-08,0.59479,0.0008461,-7.8707e-07,5.26143e-10,0.595636,0.000844527,-7.85491e-07,2.17879e-08,0.596479,0.000843021,-7.20127e-07,-2.80733e-08,0.597322,0.000841497,-8.04347e-07,3.09005e-08,0.598162,0.000839981,-7.11646e-07,-3.5924e-08,0.599002,0.00083845,-8.19418e-07,5.3191e-08,0.599839,0.000836971,-6.59845e-07,-5.76307e-08,0.600676,0.000835478,-8.32737e-07,5.81227e-08,0.60151,0.000833987,-6.58369e-07,-5.56507e-08,0.602344,0.000832503,-8.25321e-07,4.52706e-08,0.603175,0.000830988,-6.89509e-07,-6.22236e-09,0.604006,0.000829591,-7.08176e-07,-2.03811e-08,0.604834,0.000828113,-7.6932e-07,2.8142e-08,0.605662,0.000826659,-6.84894e-07,-3.25822e-08,0.606488,0.000825191,-7.8264e-07,4.25823e-08,0.607312,0.000823754,-6.54893e-07,-1.85376e-08,0.608135,0.000822389,-7.10506e-07,-2.80365e-08,0.608957,0.000820883,-7.94616e-07,7.1079e-08,0.609777,0.000819507,-5.81379e-07,-7.74655e-08,0.610596,0.000818112,-8.13775e-07,5.9969e-08,0.611413,0.000816665,-6.33868e-07,-4.32013e-08,0.612229,0.000815267,-7.63472e-07,5.32313e-08,0.613044,0.0008139,-6.03778e-07,-5.05148e-08,0.613857,0.000812541,-7.55323e-07,2.96187e-08,0.614669,0.000811119,-6.66466e-07,-8.35545e-09,0.615479,0.000809761,-6.91533e-07,3.80301e-09,0.616288,0.00080839,-6.80124e-07,-6.85666e-09,0.617096,0.000807009,-7.00694e-07,2.36237e-08,0.617903,0.000805678,-6.29822e-07,-2.80336e-08,0.618708,0.000804334,-7.13923e-07,2.8906e-08,0.619511,0.000802993,-6.27205e-07,-2.79859e-08,0.620314,0.000801655,-7.11163e-07,2.34329e-08,0.621114,0.000800303,-6.40864e-07,-6.14108e-09,0.621914,0.000799003,-6.59287e-07,1.13151e-09,0.622712,0.000797688,-6.55893e-07,1.61507e-09,0.62351,0.000796381,-6.51048e-07,-7.59186e-09,0.624305,0.000795056,-6.73823e-07,2.87524e-08,0.6251,0.000793794,-5.87566e-07,-4.7813e-08,0.625893,0.000792476,-7.31005e-07,4.32901e-08,0.626685,0.000791144,-6.01135e-07,-6.13814e-09,0.627475,0.000789923,-6.19549e-07,-1.87376e-08,0.628264,0.000788628,-6.75762e-07,2.14837e-08,0.629052,0.000787341,-6.11311e-07,-7.59265e-09,0.629839,0.000786095,-6.34089e-07,8.88692e-09,0.630625,0.000784854,-6.07428e-07,-2.7955e-08,0.631409,0.000783555,-6.91293e-07,4.33285e-08,0.632192,0.000782302,-5.61307e-07,-2.61497e-08,0.632973,0.000781101,-6.39757e-07,1.6658e-09,0.633754,0.000779827,-6.34759e-07,1.94866e-08,0.634533,0.000778616,-5.76299e-07,-2.00076e-08,0.635311,0.000777403,-6.36322e-07,9.39091e-10,0.636088,0.000776133,-6.33505e-07,1.62512e-08,0.636863,0.000774915,-5.84751e-07,-6.33937e-09,0.637638,0.000773726,-6.03769e-07,9.10609e-09,0.638411,0.000772546,-5.76451e-07,-3.00849e-08,0.639183,0.000771303,-6.66706e-07,5.1629e-08,0.639953,0.000770125,-5.11819e-07,-5.7222e-08,0.640723,0.000768929,-6.83485e-07,5.80497e-08,0.641491,0.000767736,-5.09336e-07,-5.57674e-08,0.642259,0.000766551,-6.76638e-07,4.58105e-08,0.643024,0.000765335,-5.39206e-07,-8.26541e-09,0.643789,0.000764231,-5.64002e-07,-1.27488e-08,0.644553,0.000763065,-6.02249e-07,-3.44168e-10,0.645315,0.00076186,-6.03281e-07,1.41254e-08,0.646077,0.000760695,-5.60905e-07,3.44727e-09,0.646837,0.000759584,-5.50563e-07,-2.79144e-08,0.647596,0.000758399,-6.34307e-07,4.86057e-08,0.648354,0.000757276,-4.88489e-07,-4.72989e-08,0.64911,0.000756158,-6.30386e-07,2.13807e-08,0.649866,0.000754961,-5.66244e-07,2.13808e-08,0.65062,0.000753893,-5.02102e-07,-4.7299e-08,0.651374,0.000752746,-6.43999e-07,4.86059e-08,0.652126,0.000751604,-4.98181e-07,-2.79154e-08,0.652877,0.000750524,-5.81927e-07,3.45089e-09,0.653627,0.000749371,-5.71575e-07,1.41119e-08,0.654376,0.00074827,-5.29239e-07,-2.93748e-10,0.655123,0.00074721,-5.3012e-07,-1.29368e-08,0.65587,0.000746111,-5.68931e-07,-7.56355e-09,0.656616,0.000744951,-5.91621e-07,4.3191e-08,0.65736,0.000743897,-4.62048e-07,-4.59911e-08,0.658103,0.000742835,-6.00022e-07,2.15642e-08,0.658846,0.0007417,-5.35329e-07,1.93389e-08,0.659587,0.000740687,-4.77312e-07,-3.93152e-08,0.660327,0.000739615,-5.95258e-07,1.87126e-08,0.661066,0.00073848,-5.3912e-07,2.40695e-08,0.661804,0.000737474,-4.66912e-07,-5.53859e-08,0.662541,0.000736374,-6.33069e-07,7.82648e-08,0.663277,0.000735343,-3.98275e-07,-7.88593e-08,0.664012,0.00073431,-6.34853e-07,5.83585e-08,0.664745,0.000733215,-4.59777e-07,-3.53656e-08,0.665478,0.000732189,-5.65874e-07,2.34994e-08,0.66621,0.000731128,-4.95376e-07,9.72743e-10,0.66694,0.00073014,-4.92458e-07,-2.73903e-08,0.66767,0.000729073,-5.74629e-07,4.89839e-08,0.668398,0.000728071,-4.27677e-07,-4.93359e-08,0.669126,0.000727068,-5.75685e-07,2.91504e-08,0.669853,0.000726004,-4.88234e-07,-7.66109e-09,0.670578,0.000725004,-5.11217e-07,1.49392e-09,0.671303,0.000723986,-5.06735e-07,1.68533e-09,0.672026,0.000722978,-5.01679e-07,-8.23525e-09,0.672749,0.00072195,-5.26385e-07,3.12556e-08,0.67347,0.000720991,-4.32618e-07,-5.71825e-08,0.674191,0.000719954,-6.04166e-07,7.8265e-08,0.67491,0.00071898,-3.69371e-07,-7.70634e-08,0.675628,0.00071801,-6.00561e-07,5.11747e-08,0.676346,0.000716963,-4.47037e-07,-8.42615e-09,0.677062,0.000716044,-4.72315e-07,-1.747e-08,0.677778,0.000715046,-5.24725e-07,1.87015e-08,0.678493,0.000714053,-4.68621e-07,2.26856e-09,0.679206,0.000713123,-4.61815e-07,-2.77758e-08,0.679919,0.000712116,-5.45142e-07,4.92298e-08,0.68063,0.000711173,-3.97453e-07,-4.99339e-08,0.681341,0.000710228,-5.47255e-07,3.12967e-08,0.682051,0.000709228,-4.53365e-07,-1.56481e-08,0.68276,0.000708274,-5.00309e-07,3.12958e-08,0.683467,0.000707367,-4.06422e-07,-4.99303e-08,0.684174,0.000706405,-5.56213e-07,4.9216e-08,0.68488,0.00070544,-4.08565e-07,-2.77245e-08,0.685585,0.00070454,-4.91738e-07,2.07748e-09,0.686289,0.000703562,-4.85506e-07,1.94146e-08,0.686992,0.00070265,-4.27262e-07,-2.01314e-08,0.687695,0.000701735,-4.87656e-07,1.50616e-09,0.688396,0.000700764,-4.83137e-07,1.41067e-08,0.689096,0.00069984,-4.40817e-07,1.67168e-09,0.689795,0.000698963,-4.35802e-07,-2.07934e-08,0.690494,0.000698029,-4.98182e-07,2.18972e-08,0.691192,0.000697099,-4.32491e-07,-7.19092e-09,0.691888,0.000696212,-4.54064e-07,6.86642e-09,0.692584,0.000695325,-4.33464e-07,-2.02747e-08,0.693279,0.000694397,-4.94288e-07,1.46279e-08,0.693973,0.000693452,-4.50405e-07,2.13678e-08,0.694666,0.000692616,-3.86301e-07,-4.04945e-08,0.695358,0.000691721,-5.07785e-07,2.14009e-08,0.696049,0.00069077,-4.43582e-07,1.44955e-08,0.69674,0.000689926,-4.00096e-07,-1.97783e-08,0.697429,0.000689067,-4.5943e-07,5.01296e-09,0.698118,0.000688163,-4.44392e-07,-2.73521e-10,0.698805,0.000687273,-4.45212e-07,-3.91893e-09,0.699492,0.000686371,-4.56969e-07,1.59493e-08,0.700178,0.000685505,-4.09121e-07,-2.73351e-10,0.700863,0.000684686,-4.09941e-07,-1.4856e-08,0.701548,0.000683822,-4.54509e-07,9.25979e-11,0.702231,0.000682913,-4.54231e-07,1.44855e-08,0.702913,0.000682048,-4.10775e-07,1.56992e-09,0.703595,0.000681231,-4.06065e-07,-2.07652e-08,0.704276,0.000680357,-4.68361e-07,2.18864e-08,0.704956,0.000679486,-4.02701e-07,-7.17595e-09,0.705635,0.000678659,-4.24229e-07,6.81748e-09,0.706313,0.000677831,-4.03777e-07,-2.0094e-08,0.70699,0.000676963,-4.64059e-07,1.39538e-08,0.707667,0.000676077,-4.22197e-07,2.38835e-08,0.708343,0.000675304,-3.50547e-07,-4.98831e-08,0.709018,0.000674453,-5.00196e-07,5.64395e-08,0.709692,0.000673622,-3.30878e-07,-5.66657e-08,0.710365,0.00067279,-5.00875e-07,5.1014e-08,0.711037,0.000671942,-3.47833e-07,-2.81809e-08,0.711709,0.000671161,-4.32376e-07,2.10513e-09,0.712379,0.000670303,-4.2606e-07,1.97604e-08,0.713049,0.00066951,-3.66779e-07,-2.15422e-08,0.713718,0.000668712,-4.31406e-07,6.8038e-09,0.714387,0.000667869,-4.10994e-07,-5.67295e-09,0.715054,0.00066703,-4.28013e-07,1.5888e-08,0.715721,0.000666222,-3.80349e-07,1.72576e-09,0.716387,0.000665467,-3.75172e-07,-2.27911e-08,0.717052,0.000664648,-4.43545e-07,2.9834e-08,0.717716,0.00066385,-3.54043e-07,-3.69401e-08,0.718379,0.000663031,-4.64864e-07,5.83219e-08,0.719042,0.000662277,-2.89898e-07,-7.71382e-08,0.719704,0.000661465,-5.21313e-07,7.14171e-08,0.720365,0.000660637,-3.07061e-07,-2.97161e-08,0.721025,0.000659934,-3.96209e-07,-1.21575e-08,0.721685,0.000659105,-4.32682e-07,1.87412e-08,0.722343,0.000658296,-3.76458e-07,-3.2029e-09,0.723001,0.000657533,-3.86067e-07,-5.9296e-09,0.723659,0.000656743,-4.03856e-07,2.69213e-08,0.724315,0.000656016,-3.23092e-07,-4.21511e-08,0.724971,0.000655244,-4.49545e-07,2.24737e-08,0.725625,0.000654412,-3.82124e-07,1.18611e-08,0.726279,0.000653683,-3.46541e-07,-1.03132e-08,0.726933,0.000652959,-3.7748e-07,-3.02128e-08,0.727585,0.000652114,-4.68119e-07,7.15597e-08,0.728237,0.000651392,-2.5344e-07,-7.72119e-08,0.728888,0.000650654,-4.85075e-07,5.8474e-08,0.729538,0.000649859,-3.09654e-07,-3.74746e-08,0.730188,0.000649127,-4.22077e-07,3.18197e-08,0.730837,0.000648379,-3.26618e-07,-3.01997e-08,0.731485,0.000647635,-4.17217e-07,2.93747e-08,0.732132,0.000646888,-3.29093e-07,-2.76943e-08,0.732778,0.000646147,-4.12176e-07,2.17979e-08,0.733424,0.000645388,-3.46783e-07,1.07292e-10,0.734069,0.000644695,-3.46461e-07,-2.22271e-08,0.734713,0.000643935,-4.13142e-07,2.91963e-08,0.735357,0.000643197,-3.25553e-07,-3.49536e-08,0.736,0.000642441,-4.30414e-07,5.10133e-08,0.736642,0.000641733,-2.77374e-07,-4.98904e-08,0.737283,0.000641028,-4.27045e-07,2.93392e-08,0.737924,0.000640262,-3.39028e-07,-7.86156e-09,0.738564,0.000639561,-3.62612e-07,2.10703e-09,0.739203,0.000638842,-3.56291e-07,-5.6653e-10,0.739842,0.000638128,-3.57991e-07,1.59086e-10,0.740479,0.000637412,-3.57513e-07,-6.98321e-11,0.741116,0.000636697,-3.57723e-07,1.20214e-10,0.741753,0.000635982,-3.57362e-07,-4.10987e-10,0.742388,0.000635266,-3.58595e-07,1.5237e-09,0.743023,0.000634553,-3.54024e-07,-5.68376e-09,0.743657,0.000633828,-3.71075e-07,2.12113e-08,0.744291,0.00063315,-3.07441e-07,-1.95569e-08,0.744924,0.000632476,-3.66112e-07,-2.58816e-09,0.745556,0.000631736,-3.73877e-07,2.99096e-08,0.746187,0.000631078,-2.84148e-07,-5.74454e-08,0.746818,0.000630337,-4.56484e-07,8.06629e-08,0.747448,0.000629666,-2.14496e-07,-8.63922e-08,0.748077,0.000628978,-4.73672e-07,8.60918e-08,0.748706,0.000628289,-2.15397e-07,-7.91613e-08,0.749334,0.000627621,-4.5288e-07,5.17393e-08,0.749961,0.00062687,-2.97663e-07,-8.58662e-09,0.750588,0.000626249,-3.23422e-07,-1.73928e-08,0.751214,0.00062555,-3.75601e-07,1.85532e-08,0.751839,0.000624855,-3.19941e-07,2.78479e-09,0.752463,0.000624223,-3.11587e-07,-2.96923e-08,0.753087,0.000623511,-4.00664e-07,5.63799e-08,0.75371,0.000622879,-2.31524e-07,-7.66179e-08,0.754333,0.000622186,-4.61378e-07,7.12778e-08,0.754955,0.000621477,-2.47545e-07,-2.96794e-08,0.755576,0.000620893,-3.36583e-07,-1.21648e-08,0.756196,0.000620183,-3.73077e-07,1.87339e-08,0.756816,0.000619493,-3.16875e-07,-3.16622e-09,0.757435,0.00061885,-3.26374e-07,-6.0691e-09,0.758054,0.000618179,-3.44581e-07,2.74426e-08,0.758672,0.000617572,-2.62254e-07,-4.40968e-08,0.759289,0.000616915,-3.94544e-07,2.97352e-08,0.759906,0.000616215,-3.05338e-07,-1.52393e-08,0.760522,0.000615559,-3.51056e-07,3.12221e-08,0.761137,0.000614951,-2.5739e-07,-5.00443e-08,0.761751,0.000614286,-4.07523e-07,4.9746e-08,0.762365,0.00061362,-2.58285e-07,-2.97303e-08,0.762979,0.000613014,-3.47476e-07,9.57079e-09,0.763591,0.000612348,-3.18764e-07,-8.55287e-09,0.764203,0.000611685,-3.44422e-07,2.46407e-08,0.764815,0.00061107,-2.705e-07,-3.04053e-08,0.765426,0.000610437,-3.61716e-07,3.73759e-08,0.766036,0.000609826,-2.49589e-07,-5.94935e-08,0.766645,0.000609149,-4.28069e-07,8.13889e-08,0.767254,0.000608537,-1.83902e-07,-8.72483e-08,0.767862,0.000607907,-4.45647e-07,8.87901e-08,0.76847,0.000607282,-1.79277e-07,-8.90983e-08,0.769077,0.000606656,-4.46572e-07,8.87892e-08,0.769683,0.000606029,-1.80204e-07,-8.72446e-08,0.770289,0.000605407,-4.41938e-07,8.13752e-08,0.770894,0.000604768,-1.97812e-07,-5.94423e-08,0.771498,0.000604194,-3.76139e-07,3.71848e-08,0.772102,0.000603553,-2.64585e-07,-2.96922e-08,0.772705,0.000602935,-3.53661e-07,2.19793e-08,0.773308,0.000602293,-2.87723e-07,1.37955e-09,0.77391,0.000601722,-2.83585e-07,-2.74976e-08,0.774512,0.000601072,-3.66077e-07,4.9006e-08,0.775112,0.000600487,-2.19059e-07,-4.93171e-08,0.775712,0.000599901,-3.67011e-07,2.90531e-08,0.776312,0.000599254,-2.79851e-07,-7.29081e-09,0.776911,0.000598673,-3.01724e-07,1.10077e-10,0.777509,0.00059807,-3.01393e-07,6.85053e-09,0.778107,0.000597487,-2.80842e-07,-2.75123e-08,0.778704,0.000596843,-3.63379e-07,4.35939e-08,0.779301,0.000596247,-2.32597e-07,-2.7654e-08,0.779897,0.000595699,-3.15559e-07,7.41741e-09,0.780492,0.00059509,-2.93307e-07,-2.01562e-09,0.781087,0.000594497,-2.99354e-07,6.45059e-10,0.781681,0.000593901,-2.97418e-07,-5.64635e-10,0.782275,0.000593304,-2.99112e-07,1.61347e-09,0.782868,0.000592711,-2.94272e-07,-5.88926e-09,0.78346,0.000592105,-3.1194e-07,2.19436e-08,0.784052,0.000591546,-2.46109e-07,-2.22805e-08,0.784643,0.000590987,-3.1295e-07,7.57368e-09,0.785234,0.000590384,-2.90229e-07,-8.01428e-09,0.785824,0.00058978,-3.14272e-07,2.44834e-08,0.786414,0.000589225,-2.40822e-07,-3.03148e-08,0.787003,0.000588652,-3.31766e-07,3.7171e-08,0.787591,0.0005881,-2.20253e-07,-5.87646e-08,0.788179,0.000587483,-3.96547e-07,7.86782e-08,0.788766,0.000586926,-1.60512e-07,-7.71342e-08,0.789353,0.000586374,-3.91915e-07,5.10444e-08,0.789939,0.000585743,-2.38782e-07,-7.83422e-09,0.790524,0.000585242,-2.62284e-07,-1.97076e-08,0.791109,0.000584658,-3.21407e-07,2.70598e-08,0.791693,0.000584097,-2.40228e-07,-2.89269e-08,0.792277,0.000583529,-3.27008e-07,2.90431e-08,0.792861,0.000582963,-2.39879e-07,-2.76409e-08,0.793443,0.0005824,-3.22802e-07,2.1916e-08,0.794025,0.00058182,-2.57054e-07,-4.18368e-10,0.794607,0.000581305,-2.58309e-07,-2.02425e-08,0.795188,0.000580727,-3.19036e-07,2.17838e-08,0.795768,0.000580155,-2.53685e-07,-7.28814e-09,0.796348,0.000579625,-2.75549e-07,7.36871e-09,0.796928,0.000579096,-2.53443e-07,-2.21867e-08,0.797506,0.000578523,-3.20003e-07,2.17736e-08,0.798085,0.000577948,-2.54683e-07,-5.30296e-09,0.798662,0.000577423,-2.70592e-07,-5.61698e-10,0.799239,0.00057688,-2.72277e-07,7.54977e-09,0.799816,0.000576358,-2.49627e-07,-2.96374e-08,0.800392,0.00057577,-3.38539e-07,5.1395e-08,0.800968,0.000575247,-1.84354e-07,-5.67335e-08,0.801543,0.000574708,-3.54555e-07,5.63297e-08,0.802117,0.000574168,-1.85566e-07,-4.93759e-08,0.802691,0.000573649,-3.33693e-07,2.19646e-08,0.803264,0.000573047,-2.678e-07,2.1122e-08,0.803837,0.000572575,-2.04433e-07,-4.68482e-08,0.804409,0.000572026,-3.44978e-07,4.70613e-08,0.804981,0.000571477,-2.03794e-07,-2.21877e-08,0.805552,0.000571003,-2.70357e-07,-1.79153e-08,0.806123,0.000570408,-3.24103e-07,3.42443e-08,0.806693,0.000569863,-2.2137e-07,1.47556e-10,0.807263,0.000569421,-2.20928e-07,-3.48345e-08,0.807832,0.000568874,-3.25431e-07,1.99812e-08,0.808401,0.000568283,-2.65487e-07,1.45143e-08,0.808969,0.000567796,-2.21945e-07,-1.84338e-08,0.809536,0.000567297,-2.77246e-07,-3.83608e-10,0.810103,0.000566741,-2.78397e-07,1.99683e-08,0.81067,0.000566244,-2.18492e-07,-1.98848e-08,0.811236,0.000565747,-2.78146e-07,-3.38976e-11,0.811801,0.000565191,-2.78248e-07,2.00204e-08,0.812366,0.000564695,-2.18187e-07,-2.04429e-08,0.812931,0.000564197,-2.79516e-07,2.1467e-09,0.813495,0.000563644,-2.73076e-07,1.18561e-08,0.814058,0.000563134,-2.37507e-07,1.00334e-08,0.814621,0.000562689,-2.07407e-07,-5.19898e-08,0.815183,0.000562118,-3.63376e-07,7.87163e-08,0.815745,0.000561627,-1.27227e-07,-8.40616e-08,0.816306,0.000561121,-3.79412e-07,7.87163e-08,0.816867,0.000560598,-1.43263e-07,-5.19898e-08,0.817428,0.000560156,-2.99233e-07,1.00335e-08,0.817988,0.000559587,-2.69132e-07,1.18559e-08,0.818547,0.000559085,-2.33564e-07,2.14764e-09,0.819106,0.000558624,-2.27122e-07,-2.04464e-08,0.819664,0.000558108,-2.88461e-07,2.00334e-08,0.820222,0.000557591,-2.28361e-07,-8.24277e-11,0.820779,0.000557135,-2.28608e-07,-1.97037e-08,0.821336,0.000556618,-2.87719e-07,1.92925e-08,0.821893,0.000556101,-2.29841e-07,2.13831e-09,0.822448,0.000555647,-2.23427e-07,-2.78458e-08,0.823004,0.000555117,-3.06964e-07,4.96402e-08,0.823559,0.000554652,-1.58043e-07,-5.15058e-08,0.824113,0.000554181,-3.12561e-07,3.71737e-08,0.824667,0.000553668,-2.0104e-07,-3.75844e-08,0.82522,0.000553153,-3.13793e-07,5.35592e-08,0.825773,0.000552686,-1.53115e-07,-5.74431e-08,0.826326,0.000552207,-3.25444e-07,5.7004e-08,0.826878,0.000551728,-1.54433e-07,-5.13635e-08,0.827429,0.000551265,-3.08523e-07,2.92406e-08,0.82798,0.000550735,-2.20801e-07,-5.99424e-09,0.828531,0.000550276,-2.38784e-07,-5.26363e-09,0.829081,0.000549782,-2.54575e-07,2.70488e-08,0.82963,0.000549354,-1.73429e-07,-4.33268e-08,0.83018,0.000548878,-3.03409e-07,2.7049e-08,0.830728,0.000548352,-2.22262e-07,-5.26461e-09,0.831276,0.000547892,-2.38056e-07,-5.99057e-09,0.831824,0.000547397,-2.56027e-07,2.92269e-08,0.832371,0.000546973,-1.68347e-07,-5.13125e-08,0.832918,0.000546482,-3.22284e-07,5.68139e-08,0.833464,0.000546008,-1.51843e-07,-5.67336e-08,0.83401,0.000545534,-3.22043e-07,5.09113e-08,0.834555,0.000545043,-1.6931e-07,-2.77022e-08,0.8351,0.000544621,-2.52416e-07,2.92924e-10,0.835644,0.000544117,-2.51537e-07,2.65305e-08,0.836188,0.000543694,-1.71946e-07,-4.68105e-08,0.836732,0.00054321,-3.12377e-07,4.15021e-08,0.837275,0.000542709,-1.87871e-07,1.13355e-11,0.837817,0.000542334,-1.87837e-07,-4.15474e-08,0.838359,0.000541833,-3.12479e-07,4.69691e-08,0.838901,0.000541349,-1.71572e-07,-2.71196e-08,0.839442,0.000540925,-2.52931e-07,1.90462e-09,0.839983,0.000540425,-2.47217e-07,1.95011e-08,0.840523,0.000539989,-1.88713e-07,-2.03045e-08,0.841063,0.00053955,-2.49627e-07,2.11216e-09,0.841602,0.000539057,-2.4329e-07,1.18558e-08,0.842141,0.000538606,-2.07723e-07,1.00691e-08,0.842679,0.000538221,-1.77516e-07,-5.21324e-08,0.843217,0.00053771,-3.33913e-07,7.92513e-08,0.843755,0.00053728,-9.6159e-08,-8.60587e-08,0.844292,0.000536829,-3.54335e-07,8.61696e-08,0.844828,0.000536379,-9.58263e-08,-7.98057e-08,0.845364,0.000535948,-3.35243e-07,5.42394e-08,0.8459,0.00053544,-1.72525e-07,-1.79426e-08,0.846435,0.000535041,-2.26353e-07,1.75308e-08,0.84697,0.000534641,-1.73761e-07,-5.21806e-08,0.847505,0.000534137,-3.30302e-07,7.19824e-08,0.848038,0.000533692,-1.14355e-07,-5.69349e-08,0.848572,0.000533293,-2.8516e-07,3.65479e-08,0.849105,0.000532832,-1.75516e-07,-2.96519e-08,0.849638,0.000532392,-2.64472e-07,2.2455e-08,0.85017,0.000531931,-1.97107e-07,-5.63451e-10,0.850702,0.000531535,-1.98797e-07,-2.02011e-08,0.851233,0.000531077,-2.59401e-07,2.17634e-08,0.851764,0.000530623,-1.94111e-07,-7.24794e-09,0.852294,0.000530213,-2.15854e-07,7.22832e-09,0.852824,0.000529803,-1.94169e-07,-2.16653e-08,0.853354,0.00052935,-2.59165e-07,1.98283e-08,0.853883,0.000528891,-1.9968e-07,1.95678e-09,0.854412,0.000528497,-1.9381e-07,-2.76554e-08,0.85494,0.000528027,-2.76776e-07,4.90603e-08,0.855468,0.00052762,-1.29596e-07,-4.93764e-08,0.855995,0.000527213,-2.77725e-07,2.92361e-08,0.856522,0.000526745,-1.90016e-07,-7.96341e-09,0.857049,0.000526341,-2.13907e-07,2.61752e-09,0.857575,0.000525922,-2.06054e-07,-2.50665e-09,0.8581,0.000525502,-2.13574e-07,7.40906e-09,0.858626,0.000525097,-1.91347e-07,-2.71296e-08,0.859151,0.000524633,-2.72736e-07,4.15048e-08,0.859675,0.000524212,-1.48221e-07,-1.96802e-08,0.860199,0.000523856,-2.07262e-07,-2.23886e-08,0.860723,0.000523375,-2.74428e-07,4.96299e-08,0.861246,0.000522975,-1.25538e-07,-5.69216e-08,0.861769,0.000522553,-2.96303e-07,5.88473e-08,0.862291,0.000522137,-1.19761e-07,-5.92584e-08,0.862813,0.00052172,-2.97536e-07,5.8977e-08,0.863334,0.000521301,-1.20605e-07,-5.74403e-08,0.863855,0.000520888,-2.92926e-07,5.15751e-08,0.864376,0.000520457,-1.38201e-07,-2.96506e-08,0.864896,0.000520091,-2.27153e-07,7.42277e-09,0.865416,0.000519659,-2.04885e-07,-4.05057e-11,0.865936,0.00051925,-2.05006e-07,-7.26074e-09,0.866455,0.000518818,-2.26788e-07,2.90835e-08,0.866973,0.000518451,-1.39538e-07,-4.94686e-08,0.867492,0.000518024,-2.87944e-07,4.95814e-08,0.868009,0.000517597,-1.39199e-07,-2.96479e-08,0.868527,0.000517229,-2.28143e-07,9.40539e-09,0.869044,0.000516801,-1.99927e-07,-7.9737e-09,0.86956,0.000516378,-2.23848e-07,2.24894e-08,0.870077,0.000515997,-1.5638e-07,-2.23793e-08,0.870592,0.000515617,-2.23517e-07,7.42302e-09,0.871108,0.000515193,-2.01248e-07,-7.31283e-09,0.871623,0.000514768,-2.23187e-07,2.18283e-08,0.872137,0.000514387,-1.57702e-07,-2.03959e-08,0.872652,0.000514011,-2.1889e-07,1.50711e-10,0.873165,0.000513573,-2.18437e-07,1.97931e-08,0.873679,0.000513196,-1.59058e-07,-1.97183e-08,0.874192,0.000512819,-2.18213e-07,-5.24324e-10,0.874704,0.000512381,-2.19786e-07,2.18156e-08,0.875217,0.000512007,-1.54339e-07,-2.71336e-08,0.875728,0.000511616,-2.3574e-07,2.71141e-08,0.87624,0.000511226,-1.54398e-07,-2.17182e-08,0.876751,0.000510852,-2.19552e-07,1.54131e-10,0.877262,0.000510414,-2.1909e-07,2.11017e-08,0.877772,0.000510039,-1.55785e-07,-2.49562e-08,0.878282,0.000509652,-2.30654e-07,1.91183e-08,0.878791,0.000509248,-1.73299e-07,8.08751e-09,0.8793,0.000508926,-1.49036e-07,-5.14684e-08,0.879809,0.000508474,-3.03441e-07,7.85766e-08,0.880317,0.000508103,-6.77112e-08,-8.40242e-08,0.880825,0.000507715,-3.19784e-07,7.87063e-08,0.881333,0.000507312,-8.36649e-08,-5.19871e-08,0.88184,0.000506988,-2.39626e-07,1.00327e-08,0.882346,0.000506539,-2.09528e-07,1.18562e-08,0.882853,0.000506156,-1.73959e-07,2.14703e-09,0.883359,0.000505814,-1.67518e-07,-2.04444e-08,0.883864,0.000505418,-2.28851e-07,2.00258e-08,0.88437,0.00050502,-1.68774e-07,-5.42855e-11,0.884874,0.000504682,-1.68937e-07,-1.98087e-08,0.885379,0.000504285,-2.28363e-07,1.96842e-08,0.885883,0.000503887,-1.6931e-07,6.76342e-10,0.886387,0.000503551,-1.67281e-07,-2.23896e-08,0.88689,0.000503149,-2.3445e-07,2.92774e-08,0.887393,0.000502768,-1.46618e-07,-3.51152e-08,0.887896,0.00050237,-2.51963e-07,5.15787e-08,0.888398,0.00050202,-9.72271e-08,-5.19903e-08,0.8889,0.00050167,-2.53198e-07,3.71732e-08,0.889401,0.000501275,-1.41678e-07,-3.70978e-08,0.889902,0.00050088,-2.52972e-07,5.16132e-08,0.890403,0.000500529,-9.81321e-08,-5.01459e-08,0.890903,0.000500183,-2.4857e-07,2.9761e-08,0.891403,0.000499775,-1.59287e-07,-9.29351e-09,0.891903,0.000499428,-1.87167e-07,7.41301e-09,0.892402,0.000499076,-1.64928e-07,-2.03585e-08,0.892901,0.000498685,-2.26004e-07,1.44165e-08,0.893399,0.000498276,-1.82754e-07,2.22974e-08,0.893898,0.000497978,-1.15862e-07,-4.40013e-08,0.894395,0.000497614,-2.47866e-07,3.44985e-08,0.894893,0.000497222,-1.44371e-07,-3.43882e-08,0.89539,0.00049683,-2.47535e-07,4.34497e-08,0.895886,0.000496465,-1.17186e-07,-2.02012e-08,0.896383,0.00049617,-1.7779e-07,-2.22497e-08,0.896879,0.000495748,-2.44539e-07,4.95952e-08,0.897374,0.000495408,-9.57532e-08,-5.69217e-08,0.89787,0.000495045,-2.66518e-07,5.88823e-08,0.898364,0.000494689,-8.98713e-08,-5.93983e-08,0.898859,0.000494331,-2.68066e-07,5.95017e-08,0.899353,0.000493973,-8.95613e-08,-5.9399e-08,0.899847,0.000493616,-2.67758e-07,5.8885e-08,0.90034,0.000493257,-9.11033e-08,-5.69317e-08,0.900833,0.000492904,-2.61898e-07,4.96326e-08,0.901326,0.000492529,-1.13001e-07,-2.23893e-08,0.901819,0.000492236,-1.80169e-07,-1.968e-08,0.902311,0.000491817,-2.39209e-07,4.15047e-08,0.902802,0.000491463,-1.14694e-07,-2.71296e-08,0.903293,0.000491152,-1.96083e-07,7.409e-09,0.903784,0.000490782,-1.73856e-07,-2.50645e-09,0.904275,0.000490427,-1.81376e-07,2.61679e-09,0.904765,0.000490072,-1.73525e-07,-7.96072e-09,0.905255,0.000489701,-1.97407e-07,2.92261e-08,0.905745,0.000489394,-1.09729e-07,-4.93389e-08,0.906234,0.000489027,-2.57746e-07,4.89204e-08,0.906723,0.000488658,-1.10985e-07,-2.71333e-08,0.907211,0.000488354,-1.92385e-07,8.30861e-12,0.907699,0.00048797,-1.9236e-07,2.71001e-08,0.908187,0.000487666,-1.1106e-07,-4.88041e-08,0.908675,0.000487298,-2.57472e-07,4.89069e-08,0.909162,0.000486929,-1.10751e-07,-2.76143e-08,0.909649,0.000486625,-1.93594e-07,1.9457e-09,0.910135,0.000486244,-1.87757e-07,1.98315e-08,0.910621,0.000485928,-1.28262e-07,-2.16671e-08,0.911107,0.000485606,-1.93264e-07,7.23216e-09,0.911592,0.000485241,-1.71567e-07,-7.26152e-09,0.912077,0.000484877,-1.93352e-07,2.18139e-08,0.912562,0.000484555,-1.2791e-07,-2.03895e-08,0.913047,0.000484238,-1.89078e-07,1.39494e-10,0.913531,0.000483861,-1.8866e-07,1.98315e-08,0.914014,0.000483543,-1.29165e-07,-1.98609e-08,0.914498,0.000483225,-1.88748e-07,7.39912e-12,0.914981,0.000482847,-1.88726e-07,1.98313e-08,0.915463,0.000482529,-1.29232e-07,-1.9728e-08,0.915946,0.000482212,-1.88416e-07,-5.24035e-10,0.916428,0.000481833,-1.89988e-07,2.18241e-08,0.916909,0.000481519,-1.24516e-07,-2.71679e-08,0.917391,0.000481188,-2.06019e-07,2.72427e-08,0.917872,0.000480858,-1.24291e-07,-2.21985e-08,0.918353,0.000480543,-1.90886e-07,1.94644e-09,0.918833,0.000480167,-1.85047e-07,1.44127e-08,0.919313,0.00047984,-1.41809e-07,7.39438e-12,0.919793,0.000479556,-1.41787e-07,-1.44423e-08,0.920272,0.000479229,-1.85114e-07,-1.84291e-09,0.920751,0.000478854,-1.90642e-07,2.18139e-08,0.92123,0.000478538,-1.25201e-07,-2.58081e-08,0.921708,0.00047821,-2.02625e-07,2.18139e-08,0.922186,0.00047787,-1.37183e-07,-1.84291e-09,0.922664,0.00047759,-1.42712e-07,-1.44423e-08,0.923141,0.000477262,-1.86039e-07,7.34701e-12,0.923618,0.00047689,-1.86017e-07,1.44129e-08,0.924095,0.000476561,-1.42778e-07,1.94572e-09,0.924572,0.000476281,-1.36941e-07,-2.21958e-08,0.925048,0.000475941,-2.03528e-07,2.72327e-08,0.925523,0.000475615,-1.2183e-07,-2.71304e-08,0.925999,0.00047529,-2.03221e-07,2.16843e-08,0.926474,0.000474949,-1.38168e-07,-2.16005e-12,0.926949,0.000474672,-1.38175e-07,-2.16756e-08,0.927423,0.000474331,-2.03202e-07,2.71001e-08,0.927897,0.000474006,-1.21902e-07,-2.71201e-08,0.928371,0.000473681,-2.03262e-07,2.17757e-08,0.928845,0.00047334,-1.37935e-07,-3.78028e-10,0.929318,0.000473063,-1.39069e-07,-2.02636e-08,0.929791,0.000472724,-1.9986e-07,2.18276e-08,0.930263,0.000472389,-1.34377e-07,-7.44231e-09,0.930736,0.000472098,-1.56704e-07,7.94165e-09,0.931208,0.000471809,-1.32879e-07,-2.43243e-08,0.931679,0.00047147,-2.05851e-07,2.97508e-08,0.932151,0.000471148,-1.16599e-07,-3.50742e-08,0.932622,0.000470809,-2.21822e-07,5.09414e-08,0.933092,0.000470518,-6.89976e-08,-4.94821e-08,0.933563,0.000470232,-2.17444e-07,2.77775e-08,0.934033,0.00046988,-1.34111e-07,-2.02351e-09,0.934502,0.000469606,-1.40182e-07,-1.96835e-08,0.934972,0.000469267,-1.99232e-07,2.11529e-08,0.935441,0.000468932,-1.35774e-07,-5.32332e-09,0.93591,0.000468644,-1.51743e-07,1.40413e-10,0.936378,0.000468341,-1.51322e-07,4.76166e-09,0.936846,0.000468053,-1.37037e-07,-1.9187e-08,0.937314,0.000467721,-1.94598e-07,1.23819e-08,0.937782,0.000467369,-1.57453e-07,2.92642e-08,0.938249,0.000467142,-6.96601e-08,-6.98342e-08,0.938716,0.000466793,-2.79163e-07,7.12586e-08,0.939183,0.000466449,-6.53869e-08,-3.63863e-08,0.939649,0.000466209,-1.74546e-07,1.46818e-08,0.940115,0.000465904,-1.305e-07,-2.2341e-08,0.940581,0.000465576,-1.97523e-07,1.50774e-08,0.941046,0.000465226,-1.52291e-07,2.16359e-08,0.941511,0.000464986,-8.73832e-08,-4.20162e-08,0.941976,0.000464685,-2.13432e-07,2.72198e-08,0.942441,0.00046434,-1.31773e-07,-7.2581e-09,0.942905,0.000464055,-1.53547e-07,1.81263e-09,0.943369,0.000463753,-1.48109e-07,7.58386e-12,0.943832,0.000463457,-1.48086e-07,-1.84298e-09,0.944296,0.000463155,-1.53615e-07,7.36433e-09,0.944759,0.00046287,-1.31522e-07,-2.76143e-08,0.945221,0.000462524,-2.14365e-07,4.34883e-08,0.945684,0.000462226,-8.39003e-08,-2.71297e-08,0.946146,0.000461977,-1.65289e-07,5.42595e-09,0.946608,0.000461662,-1.49012e-07,5.42593e-09,0.947069,0.000461381,-1.32734e-07,-2.71297e-08,0.94753,0.000461034,-2.14123e-07,4.34881e-08,0.947991,0.000460736,-8.36585e-08,-2.76134e-08,0.948452,0.000460486,-1.66499e-07,7.36083e-09,0.948912,0.000460175,-1.44416e-07,-1.82993e-09,0.949372,0.000459881,-1.49906e-07,-4.11073e-11,0.949832,0.000459581,-1.50029e-07,1.99434e-09,0.950291,0.000459287,-1.44046e-07,-7.93627e-09,0.950751,0.000458975,-1.67855e-07,2.97507e-08,0.951209,0.000458728,-7.86029e-08,-5.1462e-08,0.951668,0.000458417,-2.32989e-07,5.6888e-08,0.952126,0.000458121,-6.2325e-08,-5.68806e-08,0.952584,0.000457826,-2.32967e-07,5.14251e-08,0.953042,0.000457514,-7.86914e-08,-2.96107e-08,0.953499,0.000457268,-1.67523e-07,7.41296e-09,0.953956,0.000456955,-1.45285e-07,-4.11262e-11,0.954413,0.000456665,-1.45408e-07,-7.24847e-09,0.95487,0.000456352,-1.67153e-07,2.9035e-08,0.955326,0.000456105,-8.00484e-08,-4.92869e-08,0.955782,0.000455797,-2.27909e-07,4.89032e-08,0.956238,0.000455488,-8.11994e-08,-2.71166e-08,0.956693,0.000455244,-1.62549e-07,-4.13678e-11,0.957148,0.000454919,-1.62673e-07,2.72821e-08,0.957603,0.000454675,-8.0827e-08,-4.94824e-08,0.958057,0.000454365,-2.29274e-07,5.14382e-08,0.958512,0.000454061,-7.49597e-08,-3.7061e-08,0.958965,0.0004538,-1.86143e-07,3.72013e-08,0.959419,0.000453539,-7.45389e-08,-5.21396e-08,0.959873,0.000453234,-2.30958e-07,5.21476e-08,0.960326,0.000452928,-7.45146e-08,-3.72416e-08,0.960778,0.000452667,-1.8624e-07,3.72143e-08,0.961231,0.000452407,-7.45967e-08,-5.20109e-08,0.961683,0.000452101,-2.30629e-07,5.16199e-08,0.962135,0.000451795,-7.57696e-08,-3.52595e-08,0.962587,0.000451538,-1.81548e-07,2.98133e-08,0.963038,0.000451264,-9.2108e-08,-2.43892e-08,0.963489,0.000451007,-1.65276e-07,8.13892e-09,0.96394,0.000450701,-1.40859e-07,-8.16647e-09,0.964391,0.000450394,-1.65358e-07,2.45269e-08,0.964841,0.000450137,-9.17775e-08,-3.03367e-08,0.965291,0.000449863,-1.82787e-07,3.7215e-08,0.965741,0.000449609,-7.11424e-08,-5.89188e-08,0.96619,0.00044929,-2.47899e-07,7.92509e-08,0.966639,0.000449032,-1.01462e-08,-7.92707e-08,0.967088,0.000448773,-2.47958e-07,5.90181e-08,0.967537,0.000448455,-7.0904e-08,-3.75925e-08,0.967985,0.0004482,-1.83681e-07,3.17471e-08,0.968433,0.000447928,-8.84401e-08,-2.97913e-08,0.968881,0.000447662,-1.77814e-07,2.78133e-08,0.969329,0.000447389,-9.4374e-08,-2.18572e-08,0.969776,0.000447135,-1.59946e-07,1.10134e-11,0.970223,0.000446815,-1.59913e-07,2.18132e-08,0.97067,0.000446561,-9.44732e-08,-2.76591e-08,0.971116,0.000446289,-1.7745e-07,2.92185e-08,0.971562,0.000446022,-8.97948e-08,-2.96104e-08,0.972008,0.000445753,-1.78626e-07,2.96185e-08,0.972454,0.000445485,-8.97706e-08,-2.92588e-08,0.972899,0.000445218,-1.77547e-07,2.78123e-08,0.973344,0.000444946,-9.41103e-08,-2.23856e-08,0.973789,0.000444691,-1.61267e-07,2.12559e-09,0.974233,0.000444374,-1.5489e-07,1.38833e-08,0.974678,0.000444106,-1.13241e-07,1.94591e-09,0.975122,0.000443886,-1.07403e-07,-2.16669e-08,0.975565,0.000443606,-1.72404e-07,2.5117e-08,0.976009,0.000443336,-9.70526e-08,-1.91963e-08,0.976452,0.000443085,-1.54642e-07,-7.93627e-09,0.976895,0.000442752,-1.7845e-07,5.09414e-08,0.977338,0.000442548,-2.56262e-08,-7.66201e-08,0.97778,0.000442266,-2.55486e-07,7.67249e-08,0.978222,0.000441986,-2.53118e-08,-5.14655e-08,0.978664,0.000441781,-1.79708e-07,9.92773e-09,0.979106,0.000441451,-1.49925e-07,1.17546e-08,0.979547,0.000441186,-1.14661e-07,2.65868e-09,0.979988,0.000440965,-1.06685e-07,-2.23893e-08,0.980429,0.000440684,-1.73853e-07,2.72939e-08,0.980869,0.000440419,-9.19716e-08,-2.71816e-08,0.98131,0.000440153,-1.73516e-07,2.18278e-08,0.98175,0.000439872,-1.08033e-07,-5.24833e-10,0.982189,0.000439654,-1.09607e-07,-1.97284e-08,0.982629,0.000439376,-1.68793e-07,1.98339e-08,0.983068,0.000439097,-1.09291e-07,-2.62901e-12,0.983507,0.000438879,-1.09299e-07,-1.98234e-08,0.983946,0.000438601,-1.68769e-07,1.96916e-08,0.984384,0.000438322,-1.09694e-07,6.6157e-10,0.984823,0.000438105,-1.0771e-07,-2.23379e-08,0.985261,0.000437823,-1.74723e-07,2.90855e-08,0.985698,0.00043756,-8.74669e-08,-3.43992e-08,0.986136,0.000437282,-1.90665e-07,4.89068e-08,0.986573,0.000437048,-4.39442e-08,-4.20188e-08,0.98701,0.000436834,-1.7e-07,-4.11073e-11,0.987446,0.000436494,-1.70124e-07,4.21832e-08,0.987883,0.00043628,-4.35742e-08,-4.94824e-08,0.988319,0.000436044,-1.92021e-07,3.6537e-08,0.988755,0.00043577,-8.24102e-08,-3.70611e-08,0.989191,0.000435494,-1.93593e-07,5.21026e-08,0.989626,0.000435263,-3.72855e-08,-5.21402e-08,0.990061,0.000435032,-1.93706e-07,3.7249e-08,0.990496,0.000434756,-8.19592e-08,-3.72512e-08,0.990931,0.000434481,-1.93713e-07,5.21511e-08,0.991365,0.00043425,-3.72595e-08,-5.21439e-08,0.991799,0.000434019,-1.93691e-07,3.72152e-08,0.992233,0.000433743,-8.20456e-08,-3.71123e-08,0.992667,0.000433468,-1.93382e-07,5.16292e-08,0.9931,0.000433236,-3.84947e-08,-5.01953e-08,0.993533,0.000433008,-1.89081e-07,2.99427e-08,0.993966,0.00043272,-9.92525e-08,-9.9708e-09,0.994399,0.000432491,-1.29165e-07,9.94051e-09,0.994831,0.000432263,-9.93434e-08,-2.97912e-08,0.995263,0.000431975,-1.88717e-07,4.96198e-08,0.995695,0.000431746,-3.98578e-08,-4.94785e-08,0.996127,0.000431518,-1.88293e-07,2.9085e-08,0.996558,0.000431229,-1.01038e-07,-7.25675e-09,0.996989,0.000431005,-1.22809e-07,-5.79945e-11,0.99742,0.000430759,-1.22983e-07,7.48873e-09,0.997851,0.000430536,-1.00516e-07,-2.98969e-08,0.998281,0.000430245,-1.90207e-07,5.24942e-08,0.998711,0.000430022,-3.27246e-08,-6.08706e-08,0.999141,0.000429774,-2.15336e-07,7.17788e-08,0.999571,0.000429392,0.,0.};
+
+ template <bool srgb, int blueIdx, typename T, typename D>
+ __device__ __forceinline__ void Lab2RGBConvert_32F(const T& src, D& dst)
+ {
+ const float lThresh = 0.008856f * 903.3f;
+ const float fThresh = 7.787f * 0.008856f + 16.0f / 116.0f;
+
+ float Y, fy;
+
+ if (src.x <= lThresh)
+ {
+ Y = src.x / 903.3f;
+ fy = 7.787f * Y + 16.0f / 116.0f;
+ }
+ else
+ {
+ fy = (src.x + 16.0f) / 116.0f;
+ Y = fy * fy * fy;
+ }
+
+ float X = src.y / 500.0f + fy;
+ float Z = fy - src.z / 200.0f;
+
+ if (X <= fThresh)
+ X = (X - 16.0f / 116.0f) / 7.787f;
+ else
+ X = X * X * X;
+
+ if (Z <= fThresh)
+ Z = (Z - 16.0f / 116.0f) / 7.787f;
+ else
+ Z = Z * Z * Z;
+
+ float B = 0.052891f * X - 0.204043f * Y + 1.151152f * Z;
+ float G = -0.921235f * X + 1.875991f * Y + 0.045244f * Z;
+ float R = 3.079933f * X - 1.537150f * Y - 0.542782f * Z;
+
+ if (srgb)
+ {
+ B = splineInterpolate(B * GAMMA_TAB_SIZE, c_sRGBInvGammaTab, GAMMA_TAB_SIZE);
+ G = splineInterpolate(G * GAMMA_TAB_SIZE, c_sRGBInvGammaTab, GAMMA_TAB_SIZE);
+ R = splineInterpolate(R * GAMMA_TAB_SIZE, c_sRGBInvGammaTab, GAMMA_TAB_SIZE);
+ }
+
+ dst.x = blueIdx == 0 ? B : R;
+ dst.y = G;
+ dst.z = blueIdx == 0 ? R : B;
+ setAlpha(dst, ColorChannel<float>::max());
+ }
+
+ template <bool srgb, int blueIdx, typename T, typename D>
+ __device__ __forceinline__ void Lab2RGBConvert_8U(const T& src, D& dst)
+ {
+ float3 srcf, dstf;
+
+ srcf.x = src.x * (100.f / 255.f);
+ srcf.y = src.y - 128;
+ srcf.z = src.z - 128;
+
+ Lab2RGBConvert_32F<srgb, blueIdx>(srcf, dstf);
+
+ dst.x = saturate_cast<uchar>(dstf.x * 255.f);
+ dst.y = saturate_cast<uchar>(dstf.y * 255.f);
+ dst.z = saturate_cast<uchar>(dstf.z * 255.f);
+ setAlpha(dst, ColorChannel<uchar>::max());
+ }
+
+ template <typename T, int scn, int dcn, bool srgb, int blueIdx> struct Lab2RGB;
+
+ template <int scn, int dcn, bool srgb, int blueIdx>
+ struct Lab2RGB<uchar, scn, dcn, srgb, blueIdx>
+ : unary_function<typename TypeVec<uchar, scn>::vec_type, typename TypeVec<uchar, dcn>::vec_type>
+ {
+ __device__ __forceinline__ typename TypeVec<uchar, dcn>::vec_type operator ()(const typename TypeVec<uchar, scn>::vec_type& src) const
+ {
+ typename TypeVec<uchar, dcn>::vec_type dst;
+
+ Lab2RGBConvert_8U<srgb, blueIdx>(src, dst);
+
+ return dst;
+ }
+
+ __host__ __device__ __forceinline__ Lab2RGB() {}
+ __host__ __device__ __forceinline__ Lab2RGB(const Lab2RGB&) {}
+ };
+
+ template <int scn, int dcn, bool srgb, int blueIdx>
+ struct Lab2RGB<float, scn, dcn, srgb, blueIdx>
+ : unary_function<typename TypeVec<float, scn>::vec_type, typename TypeVec<float, dcn>::vec_type>
+ {
+ __device__ __forceinline__ typename TypeVec<float, dcn>::vec_type operator ()(const typename TypeVec<float, scn>::vec_type& src) const
+ {
+ typename TypeVec<float, dcn>::vec_type dst;
+
+ Lab2RGBConvert_32F<srgb, blueIdx>(src, dst);
+
+ return dst;
+ }
+
+ __host__ __device__ __forceinline__ Lab2RGB() {}
+ __host__ __device__ __forceinline__ Lab2RGB(const Lab2RGB&) {}
+ };
+ }
+
+#define OPENCV_GPU_IMPLEMENT_Lab2RGB_TRAITS(name, scn, dcn, srgb, blueIdx) \
+ template <typename T> struct name ## _traits \
+ { \
+ typedef ::cv::gpu::device::color_detail::Lab2RGB<T, scn, dcn, srgb, blueIdx> functor_type; \
+ static __host__ __device__ __forceinline__ functor_type create_functor() \
+ { \
+ return functor_type(); \
+ } \
+ };
+
+///////////////////////////////////// RGB <-> Luv /////////////////////////////////////
+
+ namespace color_detail
+ {
+ __constant__ float c_LabCbrtTab[] = {0.137931,0.0114066,0.,1.18859e-07,0.149338,0.011407,3.56578e-07,-5.79396e-07,0.160745,0.0114059,-1.38161e-06,2.16892e-06,0.172151,0.0114097,5.12516e-06,-8.0814e-06,0.183558,0.0113957,-1.9119e-05,3.01567e-05,0.194965,0.0114479,7.13509e-05,-0.000112545,0.206371,0.011253,-0.000266285,-0.000106493,0.217252,0.0104009,-0.000585765,7.32149e-05,0.22714,0.00944906,-0.00036612,1.21917e-05,0.236235,0.0087534,-0.000329545,2.01753e-05,0.244679,0.00815483,-0.000269019,1.24435e-05,0.252577,0.00765412,-0.000231689,1.05618e-05,0.26001,0.00722243,-0.000200003,8.26662e-06,0.267041,0.00684723,-0.000175203,6.76746e-06,0.27372,0.00651712,-0.000154901,5.61192e-06,0.280088,0.00622416,-0.000138065,4.67009e-06,0.286179,0.00596204,-0.000124055,3.99012e-06,0.292021,0.0057259,-0.000112085,3.36032e-06,0.297638,0.00551181,-0.000102004,2.95338e-06,0.30305,0.00531666,-9.31435e-05,2.52875e-06,0.308277,0.00513796,-8.55572e-05,2.22022e-06,0.313331,0.00497351,-7.88966e-05,1.97163e-06,0.318228,0.00482163,-7.29817e-05,1.7248e-06,0.322978,0.00468084,-6.78073e-05,1.55998e-06,0.327593,0.0045499,-6.31274e-05,1.36343e-06,0.332081,0.00442774,-5.90371e-05,1.27136e-06,0.336451,0.00431348,-5.5223e-05,1.09111e-06,0.34071,0.00420631,-5.19496e-05,1.0399e-06,0.344866,0.00410553,-4.88299e-05,9.18347e-07,0.348923,0.00401062,-4.60749e-05,8.29942e-07,0.352889,0.00392096,-4.35851e-05,7.98478e-07,0.356767,0.00383619,-4.11896e-05,6.84917e-07,0.360562,0.00375586,-3.91349e-05,6.63976e-07,0.36428,0.00367959,-3.7143e-05,5.93086e-07,0.367923,0.00360708,-3.53637e-05,5.6976e-07,0.371495,0.00353806,-3.36544e-05,4.95533e-07,0.375,0.00347224,-3.21678e-05,4.87951e-07,0.378441,0.00340937,-3.0704e-05,4.4349e-07,0.38182,0.00334929,-2.93735e-05,4.20297e-07,0.38514,0.0032918,-2.81126e-05,3.7872e-07,0.388404,0.00323671,-2.69764e-05,3.596e-07,0.391614,0.00318384,-2.58976e-05,3.5845e-07,0.394772,0.00313312,-2.48223e-05,2.92765e-07,0.397881,0.00308435,-2.3944e-05,3.18232e-07,0.400942,0.00303742,-2.29893e-05,2.82046e-07,0.403957,0.00299229,-2.21432e-05,2.52315e-07,0.406927,0.00294876,-2.13862e-05,2.58416e-07,0.409855,0.00290676,-2.0611e-05,2.33939e-07,0.412741,0.00286624,-1.99092e-05,2.36342e-07,0.415587,0.00282713,-1.92001e-05,1.916e-07,0.418396,0.00278931,-1.86253e-05,2.1915e-07,0.421167,0.00275271,-1.79679e-05,1.83498e-07,0.423901,0.00271733,-1.74174e-05,1.79343e-07,0.426602,0.00268303,-1.68794e-05,1.72013e-07,0.429268,0.00264979,-1.63633e-05,1.75686e-07,0.431901,0.00261759,-1.58363e-05,1.3852e-07,0.434503,0.00258633,-1.54207e-05,1.64304e-07,0.437074,0.00255598,-1.49278e-05,1.28136e-07,0.439616,0.00252651,-1.45434e-05,1.57618e-07,0.442128,0.0024979,-1.40705e-05,1.0566e-07,0.444612,0.00247007,-1.37535e-05,1.34998e-07,0.447068,0.00244297,-1.33485e-05,1.29207e-07,0.449498,0.00241666,-1.29609e-05,9.32347e-08,0.451902,0.00239102,-1.26812e-05,1.23703e-07,0.45428,0.00236603,-1.23101e-05,9.74072e-08,0.456634,0.0023417,-1.20179e-05,1.12518e-07,0.458964,0.002318,-1.16803e-05,7.83681e-08,0.46127,0.00229488,-1.14452e-05,1.10452e-07,0.463554,0.00227232,-1.11139e-05,7.58719e-08,0.465815,0.00225032,-1.08863e-05,9.2699e-08,0.468055,0.00222882,-1.06082e-05,8.97738e-08,0.470273,0.00220788,-1.03388e-05,5.4845e-08,0.47247,0.00218736,-1.01743e-05,1.0808e-07,0.474648,0.00216734,-9.85007e-06,4.9277e-08,0.476805,0.00214779,-9.70224e-06,8.22408e-08,0.478943,0.00212863,-9.45551e-06,6.87942e-08,0.481063,0.00210993,-9.24913e-06,5.98144e-08,0.483163,0.00209161,-9.06969e-06,7.93789e-08,0.485246,0.00207371,-8.83155e-06,3.99032e-08,0.487311,0.00205616,-8.71184e-06,8.88325e-08,0.489358,0.002039,-8.44534e-06,2.20004e-08,0.491389,0.00202218,-8.37934e-06,9.13872e-08,0.493403,0.0020057,-8.10518e-06,2.96829e-08,0.495401,0.00198957,-8.01613e-06,5.81028e-08,0.497382,0.00197372,-7.84183e-06,6.5731e-08,0.499348,0.00195823,-7.64463e-06,3.66019e-08,0.501299,0.00194305,-7.53483e-06,2.62811e-08,0.503234,0.00192806,-7.45598e-06,9.66907e-08,0.505155,0.00191344,-7.16591e-06,4.18928e-09,0.507061,0.00189912,-7.15334e-06,6.53665e-08,0.508953,0.00188501,-6.95724e-06,3.23686e-08,0.510831,0.00187119,-6.86014e-06,4.35774e-08,0.512696,0.0018576,-6.72941e-06,3.17406e-08,0.514547,0.00184424,-6.63418e-06,6.78785e-08,0.516384,0.00183117,-6.43055e-06,-5.23126e-09,0.518209,0.0018183,-6.44624e-06,7.22562e-08,0.520021,0.00180562,-6.22947e-06,1.42292e-08,0.52182,0.0017932,-6.18679e-06,4.9641e-08,0.523607,0.00178098,-6.03786e-06,2.56259e-08,0.525382,0.00176898,-5.96099e-06,2.66696e-08,0.527145,0.00175714,-5.88098e-06,4.65094e-08,0.528897,0.00174552,-5.74145e-06,2.57114e-08,0.530637,0.00173411,-5.66431e-06,2.94588e-08,0.532365,0.00172287,-5.57594e-06,3.52667e-08,0.534082,0.00171182,-5.47014e-06,8.28868e-09,0.535789,0.00170091,-5.44527e-06,5.07871e-08,0.537484,0.00169017,-5.29291e-06,2.69817e-08,0.539169,0.00167967,-5.21197e-06,2.01009e-08,0.540844,0.0016693,-5.15166e-06,1.18237e-08,0.542508,0.00165903,-5.11619e-06,5.18135e-08,0.544162,0.00164896,-4.96075e-06,1.9341e-08,0.545806,0.00163909,-4.90273e-06,-9.96867e-09,0.54744,0.00162926,-4.93263e-06,8.01382e-08,0.549064,0.00161963,-4.69222e-06,-1.25601e-08,0.550679,0.00161021,-4.7299e-06,2.97067e-08,0.552285,0.00160084,-4.64078e-06,1.29426e-08,0.553881,0.0015916,-4.60195e-06,3.77327e-08,0.555468,0.00158251,-4.48875e-06,1.49412e-08,0.557046,0.00157357,-4.44393e-06,2.17118e-08,0.558615,0.00156475,-4.3788e-06,1.74206e-08,0.560176,0.00155605,-4.32653e-06,2.78152e-08,0.561727,0.00154748,-4.24309e-06,-9.47239e-09,0.563271,0.00153896,-4.27151e-06,6.9679e-08,0.564805,0.00153063,-4.06247e-06,-3.08246e-08,0.566332,0.00152241,-4.15494e-06,5.36188e-08,0.56785,0.00151426,-3.99409e-06,-4.83594e-09,0.56936,0.00150626,-4.00859e-06,2.53293e-08,0.570863,0.00149832,-3.93261e-06,2.27286e-08,0.572357,0.00149052,-3.86442e-06,2.96541e-09,0.573844,0.0014828,-3.85552e-06,2.50147e-08,0.575323,0.00147516,-3.78048e-06,1.61842e-08,0.576794,0.00146765,-3.73193e-06,2.94582e-08,0.578258,0.00146028,-3.64355e-06,-1.48076e-08,0.579715,0.00145295,-3.68798e-06,2.97724e-08,0.581164,0.00144566,-3.59866e-06,1.49272e-08,0.582606,0.00143851,-3.55388e-06,2.97285e-08,0.584041,0.00143149,-3.46469e-06,-1.46323e-08,0.585469,0.00142451,-3.50859e-06,2.88004e-08,0.58689,0.00141758,-3.42219e-06,1.864e-08,0.588304,0.00141079,-3.36627e-06,1.58482e-08,0.589712,0.00140411,-3.31872e-06,-2.24279e-08,0.591112,0.00139741,-3.38601e-06,7.38639e-08,0.592507,0.00139085,-3.16441e-06,-3.46088e-08,0.593894,0.00138442,-3.26824e-06,4.96675e-09,0.595275,0.0013779,-3.25334e-06,7.4346e-08,0.59665,0.00137162,-3.0303e-06,-6.39319e-08,0.598019,0.00136536,-3.2221e-06,6.21725e-08,0.599381,0.00135911,-3.03558e-06,-5.94423e-09,0.600737,0.00135302,-3.05341e-06,2.12091e-08,0.602087,0.00134697,-2.98979e-06,-1.92876e-08,0.603431,0.00134094,-3.04765e-06,5.5941e-08,0.604769,0.00133501,-2.87983e-06,-2.56622e-08,0.606101,0.00132917,-2.95681e-06,4.67078e-08,0.607427,0.0013234,-2.81669e-06,-4.19592e-08,0.608748,0.00131764,-2.94257e-06,6.15243e-08,0.610062,0.00131194,-2.75799e-06,-2.53244e-08,0.611372,0.00130635,-2.83397e-06,3.97739e-08,0.612675,0.0013008,-2.71465e-06,-1.45618e-08,0.613973,0.00129533,-2.75833e-06,1.84733e-08,0.615266,0.00128986,-2.70291e-06,2.73606e-10,0.616553,0.00128446,-2.70209e-06,4.00367e-08,0.617835,0.00127918,-2.58198e-06,-4.12113e-08,0.619111,0.00127389,-2.70561e-06,6.52039e-08,0.620383,0.00126867,-2.51e-06,-4.07901e-08,0.621649,0.00126353,-2.63237e-06,3.83516e-08,0.62291,0.00125838,-2.51732e-06,6.59315e-09,0.624166,0.00125337,-2.49754e-06,-5.11939e-09,0.625416,0.00124836,-2.5129e-06,1.38846e-08,0.626662,0.00124337,-2.47124e-06,9.18514e-09,0.627903,0.00123846,-2.44369e-06,8.97952e-09,0.629139,0.0012336,-2.41675e-06,1.45012e-08,0.63037,0.00122881,-2.37325e-06,-7.37949e-09,0.631597,0.00122404,-2.39538e-06,1.50169e-08,0.632818,0.00121929,-2.35033e-06,6.91648e-09,0.634035,0.00121461,-2.32958e-06,1.69219e-08,0.635248,0.00121,-2.27882e-06,-1.49997e-08,0.636455,0.0012054,-2.32382e-06,4.30769e-08,0.637659,0.00120088,-2.19459e-06,-3.80986e-08,0.638857,0.00119638,-2.30888e-06,4.97134e-08,0.640051,0.00119191,-2.15974e-06,-4.15463e-08,0.641241,0.00118747,-2.28438e-06,5.68667e-08,0.642426,0.00118307,-2.11378e-06,-7.10641e-09,0.643607,0.00117882,-2.1351e-06,-2.8441e-08,0.644784,0.00117446,-2.22042e-06,6.12658e-08,0.645956,0.00117021,-2.03663e-06,-3.78083e-08,0.647124,0.00116602,-2.15005e-06,3.03627e-08,0.648288,0.00116181,-2.05896e-06,-2.40379e-08,0.649448,0.00115762,-2.13108e-06,6.57887e-08,0.650603,0.00115356,-1.93371e-06,-6.03028e-08,0.651755,0.00114951,-2.11462e-06,5.62134e-08,0.652902,0.00114545,-1.94598e-06,-4.53417e-08,0.654046,0.00114142,-2.082e-06,6.55489e-08,0.655185,0.00113745,-1.88536e-06,-3.80396e-08,0.656321,0.00113357,-1.99948e-06,2.70049e-08,0.657452,0.00112965,-1.91846e-06,-1.03755e-08,0.65858,0.00112578,-1.94959e-06,1.44973e-08,0.659704,0.00112192,-1.9061e-06,1.1991e-08,0.660824,0.00111815,-1.87012e-06,-2.85634e-09,0.66194,0.0011144,-1.87869e-06,-5.65782e-10,0.663053,0.00111064,-1.88039e-06,5.11947e-09,0.664162,0.0011069,-1.86503e-06,3.96924e-08,0.665267,0.00110328,-1.74595e-06,-4.46795e-08,0.666368,0.00109966,-1.87999e-06,1.98161e-08,0.667466,0.00109596,-1.82054e-06,2.502e-08,0.66856,0.00109239,-1.74548e-06,-6.86593e-10,0.669651,0.0010889,-1.74754e-06,-2.22739e-08,0.670738,0.00108534,-1.81437e-06,3.01776e-08,0.671821,0.0010818,-1.72383e-06,2.07732e-08,0.672902,0.00107841,-1.66151e-06,-5.36658e-08,0.673978,0.00107493,-1.82251e-06,7.46802e-08,0.675051,0.00107151,-1.59847e-06,-6.62411e-08,0.676121,0.00106811,-1.79719e-06,7.10748e-08,0.677188,0.00106473,-1.58397e-06,-3.92441e-08,0.678251,0.00106145,-1.7017e-06,2.62973e-08,0.679311,0.00105812,-1.62281e-06,-6.34035e-09,0.680367,0.00105486,-1.64183e-06,-9.36249e-10,0.68142,0.00105157,-1.64464e-06,1.00854e-08,0.68247,0.00104831,-1.61438e-06,2.01995e-08,0.683517,0.00104514,-1.55378e-06,-3.1279e-08,0.68456,0.00104194,-1.64762e-06,4.53114e-08,0.685601,0.00103878,-1.51169e-06,-3.07573e-08,0.686638,0.00103567,-1.60396e-06,1.81133e-08,0.687672,0.00103251,-1.54962e-06,1.79085e-08,0.688703,0.00102947,-1.49589e-06,-3.01428e-08,0.689731,0.00102639,-1.58632e-06,4.30583e-08,0.690756,0.00102334,-1.45715e-06,-2.28814e-08,0.691778,0.00102036,-1.52579e-06,-1.11373e-08,0.692797,0.00101727,-1.5592e-06,6.74305e-08,0.693812,0.00101436,-1.35691e-06,-7.97709e-08,0.694825,0.0010114,-1.59622e-06,7.28391e-08,0.695835,0.00100843,-1.37771e-06,-3.27715e-08,0.696842,0.00100558,-1.47602e-06,-1.35807e-09,0.697846,0.00100262,-1.48009e-06,3.82037e-08,0.698847,0.000999775,-1.36548e-06,-3.22474e-08,0.699846,0.000996948,-1.46223e-06,3.11809e-08,0.700841,0.000994117,-1.36868e-06,-3.28714e-08,0.701834,0.000991281,-1.4673e-06,4.07001e-08,0.702824,0.000988468,-1.3452e-06,-1.07197e-08,0.703811,0.000985746,-1.37736e-06,2.17866e-09,0.704795,0.000982998,-1.37082e-06,2.00521e-09,0.705777,0.000980262,-1.3648e-06,-1.01996e-08,0.706756,0.000977502,-1.3954e-06,3.87931e-08,0.707732,0.000974827,-1.27902e-06,-2.57632e-08,0.708706,0.000972192,-1.35631e-06,4.65513e-09,0.709676,0.000969493,-1.34235e-06,7.14257e-09,0.710645,0.00096683,-1.32092e-06,2.63791e-08,0.71161,0.000964267,-1.24178e-06,-5.30543e-08,0.712573,0.000961625,-1.40095e-06,6.66289e-08,0.713533,0.000959023,-1.20106e-06,-3.46474e-08,0.714491,0.000956517,-1.305e-06,1.23559e-08,0.715446,0.000953944,-1.26793e-06,-1.47763e-08,0.716399,0.000951364,-1.31226e-06,4.67494e-08,0.717349,0.000948879,-1.17201e-06,-5.3012e-08,0.718297,0.000946376,-1.33105e-06,4.60894e-08,0.719242,0.000943852,-1.19278e-06,-1.21366e-08,0.720185,0.00094143,-1.22919e-06,2.45673e-09,0.721125,0.000938979,-1.22182e-06,2.30966e-09,0.722063,0.000936543,-1.21489e-06,-1.16954e-08,0.722998,0.000934078,-1.24998e-06,4.44718e-08,0.723931,0.000931711,-1.11656e-06,-4.69823e-08,0.724861,0.000929337,-1.25751e-06,2.4248e-08,0.725789,0.000926895,-1.18477e-06,9.5949e-09,0.726715,0.000924554,-1.15598e-06,-3.02286e-09,0.727638,0.000922233,-1.16505e-06,2.49649e-09,0.72856,0.00091991,-1.15756e-06,-6.96321e-09,0.729478,0.000917575,-1.17845e-06,2.53564e-08,0.730395,0.000915294,-1.10238e-06,-3.48578e-08,0.731309,0.000912984,-1.20695e-06,5.44704e-08,0.732221,0.000910734,-1.04354e-06,-6.38144e-08,0.73313,0.000908455,-1.23499e-06,8.15781e-08,0.734038,0.00090623,-9.90253e-07,-8.3684e-08,0.734943,0.000903999,-1.2413e-06,7.43441e-08,0.735846,0.000901739,-1.01827e-06,-3.48787e-08,0.736746,0.000899598,-1.12291e-06,5.56596e-09,0.737645,0.000897369,-1.10621e-06,1.26148e-08,0.738541,0.000895194,-1.06837e-06,3.57935e-09,0.739435,0.000893068,-1.05763e-06,-2.69322e-08,0.740327,0.000890872,-1.13842e-06,4.45448e-08,0.741217,0.000888729,-1.00479e-06,-3.20376e-08,0.742105,0.000886623,-1.1009e-06,2.40011e-08,0.74299,0.000884493,-1.0289e-06,-4.36209e-09,0.743874,0.000882422,-1.04199e-06,-6.55268e-09,0.744755,0.000880319,-1.06164e-06,3.05728e-08,0.745634,0.000878287,-9.69926e-07,-5.61338e-08,0.746512,0.000876179,-1.13833e-06,7.4753e-08,0.747387,0.000874127,-9.14068e-07,-6.40644e-08,0.74826,0.000872106,-1.10626e-06,6.22955e-08,0.749131,0.000870081,-9.19375e-07,-6.59083e-08,0.75,0.000868044,-1.1171e-06,8.21284e-08,0.750867,0.000866056,-8.70714e-07,-8.37915e-08,0.751732,0.000864064,-1.12209e-06,7.42237e-08,0.752595,0.000862042,-8.99418e-07,-3.42894e-08,0.753456,0.00086014,-1.00229e-06,3.32955e-09,0.754315,0.000858146,-9.92297e-07,2.09712e-08,0.755173,0.000856224,-9.29384e-07,-2.76096e-08,0.756028,0.000854282,-1.01221e-06,2.98627e-08,0.756881,0.000852348,-9.22625e-07,-3.22365e-08,0.757733,0.000850406,-1.01933e-06,3.94786e-08,0.758582,0.000848485,-9.00898e-07,-6.46833e-09,0.75943,0.000846664,-9.20303e-07,-1.36052e-08,0.760275,0.000844783,-9.61119e-07,1.28447e-09,0.761119,0.000842864,-9.57266e-07,8.4674e-09,0.761961,0.000840975,-9.31864e-07,2.44506e-08,0.762801,0.000839185,-8.58512e-07,-4.6665e-08,0.763639,0.000837328,-9.98507e-07,4.30001e-08,0.764476,0.00083546,-8.69507e-07,-6.12609e-09,0.76531,0.000833703,-8.87885e-07,-1.84959e-08,0.766143,0.000831871,-9.43372e-07,2.05052e-08,0.766974,0.000830046,-8.81857e-07,-3.92026e-09,0.767803,0.000828271,-8.93618e-07,-4.82426e-09,0.768631,0.000826469,-9.0809e-07,2.32172e-08,0.769456,0.000824722,-8.38439e-07,-2.84401e-08,0.77028,0.00082296,-9.23759e-07,3.09386e-08,0.771102,0.000821205,-8.30943e-07,-3.57099e-08,0.771922,0.000819436,-9.38073e-07,5.22963e-08,0.772741,0.000817717,-7.81184e-07,-5.42658e-08,0.773558,0.000815992,-9.43981e-07,4.55579e-08,0.774373,0.000814241,-8.07308e-07,-8.75656e-09,0.775186,0.0008126,-8.33578e-07,-1.05315e-08,0.775998,0.000810901,-8.65172e-07,-8.72188e-09,0.776808,0.000809145,-8.91338e-07,4.54191e-08,0.777616,0.000807498,-7.5508e-07,-5.37454e-08,0.778423,0.000805827,-9.16317e-07,5.03532e-08,0.779228,0.000804145,-7.65257e-07,-2.84584e-08,0.780031,0.000802529,-8.50632e-07,3.87579e-09,0.780833,0.00080084,-8.39005e-07,1.29552e-08,0.781633,0.0007992,-8.00139e-07,3.90804e-09,0.782432,0.000797612,-7.88415e-07,-2.85874e-08,0.783228,0.000795949,-8.74177e-07,5.0837e-08,0.784023,0.000794353,-7.21666e-07,-5.55513e-08,0.784817,0.000792743,-8.8832e-07,5.21587e-08,0.785609,0.000791123,-7.31844e-07,-3.38744e-08,0.786399,0.000789558,-8.33467e-07,2.37342e-08,0.787188,0.000787962,-7.62264e-07,-1.45775e-09,0.787975,0.000786433,-7.66638e-07,-1.79034e-08,0.788761,0.000784846,-8.20348e-07,1.34665e-08,0.789545,0.000783246,-7.79948e-07,2.3642e-08,0.790327,0.000781757,-7.09022e-07,-4.84297e-08,0.791108,0.000780194,-8.54311e-07,5.08674e-08,0.791888,0.000778638,-7.01709e-07,-3.58303e-08,0.792666,0.000777127,-8.092e-07,3.28493e-08,0.793442,0.000775607,-7.10652e-07,-3.59624e-08,0.794217,0.000774078,-8.1854e-07,5.13959e-08,0.79499,0.000772595,-6.64352e-07,-5.04121e-08,0.795762,0.000771115,-8.15588e-07,3.10431e-08,0.796532,0.000769577,-7.22459e-07,-1.41557e-08,0.797301,0.00076809,-7.64926e-07,2.55795e-08,0.798069,0.000766636,-6.88187e-07,-2.85578e-08,0.798835,0.000765174,-7.73861e-07,2.90472e-08,0.799599,0.000763714,-6.86719e-07,-2.80262e-08,0.800362,0.000762256,-7.70798e-07,2.34531e-08,0.801123,0.000760785,-7.00438e-07,-6.18144e-09,0.801884,0.000759366,-7.18983e-07,1.27263e-09,0.802642,0.000757931,-7.15165e-07,1.09101e-09,0.803399,0.000756504,-7.11892e-07,-5.63675e-09,0.804155,0.000755064,-7.28802e-07,2.14559e-08,0.80491,0.00075367,-6.64434e-07,-2.05821e-08,0.805663,0.00075228,-7.26181e-07,1.26812e-09,0.806414,0.000750831,-7.22377e-07,1.55097e-08,0.807164,0.000749433,-6.75848e-07,-3.70216e-09,0.807913,0.00074807,-6.86954e-07,-7.0105e-10,0.80866,0.000746694,-6.89057e-07,6.5063e-09,0.809406,0.000745336,-6.69538e-07,-2.53242e-08,0.810151,0.000743921,-7.45511e-07,3.51858e-08,0.810894,0.000742535,-6.39953e-07,3.79034e-09,0.811636,0.000741267,-6.28582e-07,-5.03471e-08,0.812377,0.000739858,-7.79624e-07,7.83886e-08,0.813116,0.000738534,-5.44458e-07,-8.43935e-08,0.813854,0.000737192,-7.97638e-07,8.03714e-08,0.81459,0.000735838,-5.56524e-07,-5.82784e-08,0.815325,0.00073455,-7.31359e-07,3.35329e-08,0.816059,0.000733188,-6.3076e-07,-1.62486e-08,0.816792,0.000731878,-6.79506e-07,3.14614e-08,0.817523,0.000730613,-5.85122e-07,-4.99925e-08,0.818253,0.000729293,-7.35099e-07,4.92994e-08,0.818982,0.000727971,-5.87201e-07,-2.79959e-08,0.819709,0.000726712,-6.71189e-07,3.07959e-09,0.820435,0.000725379,-6.6195e-07,1.56777e-08,0.82116,0.000724102,-6.14917e-07,-6.18564e-09,0.821883,0.000722854,-6.33474e-07,9.06488e-09,0.822606,0.000721614,-6.06279e-07,-3.00739e-08,0.823327,0.000720311,-6.96501e-07,5.16262e-08,0.824046,0.000719073,-5.41623e-07,-5.72214e-08,0.824765,0.000717818,-7.13287e-07,5.80503e-08,0.825482,0.000716566,-5.39136e-07,-5.57703e-08,0.826198,0.00071532,-7.06447e-07,4.58215e-08,0.826912,0.000714045,-5.68983e-07,-8.30636e-09,0.827626,0.000712882,-5.93902e-07,-1.25961e-08,0.828338,0.000711656,-6.3169e-07,-9.13985e-10,0.829049,0.00071039,-6.34432e-07,1.62519e-08,0.829759,0.00070917,-5.85676e-07,-4.48904e-09,0.830468,0.000707985,-5.99143e-07,1.70418e-09,0.831175,0.000706792,-5.9403e-07,-2.32768e-09,0.831881,0.000705597,-6.01014e-07,7.60648e-09,0.832586,0.000704418,-5.78194e-07,-2.80982e-08,0.83329,0.000703177,-6.62489e-07,4.51817e-08,0.833993,0.000701988,-5.26944e-07,-3.34192e-08,0.834694,0.000700834,-6.27201e-07,2.88904e-08,0.835394,0.000699666,-5.4053e-07,-2.25378e-08,0.836093,0.000698517,-6.08143e-07,1.65589e-09,0.836791,0.000697306,-6.03176e-07,1.59142e-08,0.837488,0.000696147,-5.55433e-07,-5.70801e-09,0.838184,0.000695019,-5.72557e-07,6.91792e-09,0.838878,0.000693895,-5.51803e-07,-2.19637e-08,0.839571,0.000692725,-6.17694e-07,2.13321e-08,0.840263,0.000691554,-5.53698e-07,-3.75996e-09,0.840954,0.000690435,-5.64978e-07,-6.29219e-09,0.841644,0.000689287,-5.83855e-07,2.89287e-08,0.842333,0.000688206,-4.97068e-07,-4.98181e-08,0.843021,0.000687062,-6.46523e-07,5.11344e-08,0.843707,0.000685922,-4.9312e-07,-3.55102e-08,0.844393,0.00068483,-5.9965e-07,3.13019e-08,0.845077,0.000683724,-5.05745e-07,-3.00925e-08,0.84576,0.000682622,-5.96022e-07,2.94636e-08,0.846442,0.000681519,-5.07631e-07,-2.81572e-08,0.847123,0.000680419,-5.92103e-07,2.35606e-08,0.847803,0.000679306,-5.21421e-07,-6.48045e-09,0.848482,0.000678243,-5.40863e-07,2.36124e-09,0.849159,0.000677169,-5.33779e-07,-2.96461e-09,0.849836,0.000676092,-5.42673e-07,9.49728e-09,0.850512,0.000675035,-5.14181e-07,-3.50245e-08,0.851186,0.000673902,-6.19254e-07,7.09959e-08,0.851859,0.000672876,-4.06267e-07,-7.01453e-08,0.852532,0.000671853,-6.16703e-07,3.07714e-08,0.853203,0.000670712,-5.24388e-07,6.66423e-09,0.853873,0.000669684,-5.04396e-07,2.17629e-09,0.854542,0.000668681,-4.97867e-07,-1.53693e-08,0.855211,0.000667639,-5.43975e-07,-3.03752e-10,0.855878,0.000666551,-5.44886e-07,1.65844e-08,0.856544,0.000665511,-4.95133e-07,-6.42907e-09,0.857209,0.000664501,-5.1442e-07,9.13195e-09,0.857873,0.0006635,-4.87024e-07,-3.00987e-08,0.858536,0.000662435,-5.7732e-07,5.16584e-08,0.859198,0.000661436,-4.22345e-07,-5.73255e-08,0.859859,0.000660419,-5.94322e-07,5.84343e-08,0.860518,0.000659406,-4.19019e-07,-5.72022e-08,0.861177,0.000658396,-5.90626e-07,5.11653e-08,0.861835,0.000657368,-4.3713e-07,-2.82495e-08,0.862492,0.000656409,-5.21878e-07,2.22788e-09,0.863148,0.000655372,-5.15195e-07,1.9338e-08,0.863803,0.0006544,-4.5718e-07,-1.99754e-08,0.864457,0.000653425,-5.17107e-07,9.59024e-10,0.86511,0.000652394,-5.1423e-07,1.61393e-08,0.865762,0.000651414,-4.65812e-07,-5.91149e-09,0.866413,0.000650465,-4.83546e-07,7.50665e-09,0.867063,0.00064952,-4.61026e-07,-2.4115e-08,0.867712,0.000648526,-5.33371e-07,2.93486e-08,0.86836,0.000647547,-4.45325e-07,-3.36748e-08,0.869007,0.000646555,-5.4635e-07,4.57461e-08,0.869653,0.0006456,-4.09112e-07,-3.01002e-08,0.870298,0.000644691,-4.99412e-07,1.50501e-08,0.870942,0.000643738,-4.54262e-07,-3.01002e-08,0.871585,0.000642739,-5.44563e-07,4.57461e-08,0.872228,0.000641787,-4.07324e-07,-3.36748e-08,0.872869,0.000640871,-5.08349e-07,2.93486e-08,0.873509,0.000639943,-4.20303e-07,-2.4115e-08,0.874149,0.00063903,-4.92648e-07,7.50655e-09,0.874787,0.000638067,-4.70128e-07,-5.91126e-09,0.875425,0.000637109,-4.87862e-07,1.61385e-08,0.876062,0.000636182,-4.39447e-07,9.61961e-10,0.876697,0.000635306,-4.36561e-07,-1.99863e-08,0.877332,0.000634373,-4.9652e-07,1.93785e-08,0.877966,0.000633438,-4.38384e-07,2.07697e-09,0.878599,0.000632567,-4.32153e-07,-2.76864e-08,0.879231,0.00063162,-5.15212e-07,4.90641e-08,0.879862,0.000630737,-3.6802e-07,-4.93606e-08,0.880493,0.000629852,-5.16102e-07,2.9169e-08,0.881122,0.000628908,-4.28595e-07,-7.71083e-09,0.881751,0.000628027,-4.51727e-07,1.6744e-09,0.882378,0.000627129,-4.46704e-07,1.01317e-09,0.883005,0.000626239,-4.43665e-07,-5.72703e-09,0.883631,0.000625334,-4.60846e-07,2.1895e-08,0.884255,0.000624478,-3.95161e-07,-2.22481e-08,0.88488,0.000623621,-4.61905e-07,7.4928e-09,0.885503,0.00062272,-4.39427e-07,-7.72306e-09,0.886125,0.000621818,-4.62596e-07,2.33995e-08,0.886746,0.000620963,-3.92398e-07,-2.62704e-08,0.887367,0.000620099,-4.71209e-07,2.20775e-08,0.887987,0.000619223,-4.04976e-07,-2.43496e-09,0.888605,0.000618406,-4.12281e-07,-1.23377e-08,0.889223,0.000617544,-4.49294e-07,-7.81876e-09,0.88984,0.000616622,-4.72751e-07,4.36128e-08,0.890457,0.000615807,-3.41912e-07,-4.7423e-08,0.891072,0.000614981,-4.84181e-07,2.68698e-08,0.891687,0.000614093,-4.03572e-07,-4.51384e-10,0.8923,0.000613285,-4.04926e-07,-2.50643e-08,0.892913,0.0006124,-4.80119e-07,4.11038e-08,0.893525,0.000611563,-3.56808e-07,-2.01414e-08,0.894136,0.000610789,-4.17232e-07,-2.01426e-08,0.894747,0.000609894,-4.7766e-07,4.11073e-08,0.895356,0.000609062,-3.54338e-07,-2.50773e-08,0.895965,0.000608278,-4.2957e-07,-4.02954e-10,0.896573,0.000607418,-4.30779e-07,2.66891e-08,0.89718,0.000606636,-3.50711e-07,-4.67489e-08,0.897786,0.000605795,-4.90958e-07,4.10972e-08,0.898391,0.000604936,-3.67666e-07,1.56948e-09,0.898996,0.000604205,-3.62958e-07,-4.73751e-08,0.8996,0.000603337,-5.05083e-07,6.87214e-08,0.900202,0.000602533,-2.98919e-07,-4.86966e-08,0.900805,0.000601789,-4.45009e-07,6.85589e-09,0.901406,0.00060092,-4.24441e-07,2.1273e-08,0.902007,0.000600135,-3.60622e-07,-3.23434e-08,0.902606,0.000599317,-4.57652e-07,4.84959e-08,0.903205,0.000598547,-3.12164e-07,-4.24309e-08,0.903803,0.000597795,-4.39457e-07,2.01844e-09,0.904401,0.000596922,-4.33402e-07,3.43571e-08,0.904997,0.000596159,-3.30331e-07,-2.02374e-08,0.905593,0.000595437,-3.91043e-07,-1.30123e-08,0.906188,0.000594616,-4.3008e-07,1.26819e-08,0.906782,0.000593794,-3.92034e-07,2.18894e-08,0.907376,0.000593076,-3.26366e-07,-4.06349e-08,0.907968,0.000592301,-4.4827e-07,2.1441e-08,0.90856,0.000591469,-3.83947e-07,1.44754e-08,0.909151,0.000590744,-3.40521e-07,-1.97379e-08,0.909742,0.000590004,-3.99735e-07,4.87161e-09,0.910331,0.000589219,-3.8512e-07,2.51532e-10,0.91092,0.00058845,-3.84366e-07,-5.87776e-09,0.911508,0.000587663,-4.01999e-07,2.32595e-08,0.912096,0.000586929,-3.3222e-07,-2.75554e-08,0.912682,0.000586182,-4.14887e-07,2.73573e-08,0.913268,0.000585434,-3.32815e-07,-2.22692e-08,0.913853,0.000584702,-3.99622e-07,2.11486e-09,0.914437,0.000583909,-3.93278e-07,1.38098e-08,0.915021,0.000583164,-3.51848e-07,2.25042e-09,0.915604,0.000582467,-3.45097e-07,-2.28115e-08,0.916186,0.000581708,-4.13531e-07,2.93911e-08,0.916767,0.000580969,-3.25358e-07,-3.51481e-08,0.917348,0.000580213,-4.30803e-07,5.15967e-08,0.917928,0.000579506,-2.76012e-07,-5.20296e-08,0.918507,0.000578798,-4.32101e-07,3.73124e-08,0.919085,0.000578046,-3.20164e-07,-3.76154e-08,0.919663,0.000577293,-4.3301e-07,5.35447e-08,0.92024,0.000576587,-2.72376e-07,-5.7354e-08,0.920816,0.000575871,-4.44438e-07,5.66621e-08,0.921391,0.000575152,-2.74452e-07,-5.00851e-08,0.921966,0.000574453,-4.24707e-07,2.4469e-08,0.92254,0.000573677,-3.513e-07,1.18138e-08,0.923114,0.000573009,-3.15859e-07,-1.21195e-08,0.923686,0.000572341,-3.52217e-07,-2.29403e-08,0.924258,0.000571568,-4.21038e-07,4.4276e-08,0.924829,0.000570859,-2.8821e-07,-3.49546e-08,0.9254,0.000570178,-3.93074e-07,3.59377e-08,0.92597,0.000569499,-2.85261e-07,-4.91915e-08,0.926539,0.000568781,-4.32835e-07,4.16189e-08,0.927107,0.00056804,-3.07979e-07,1.92523e-09,0.927675,0.00056743,-3.02203e-07,-4.93198e-08,0.928242,0.000566678,-4.50162e-07,7.61447e-08,0.928809,0.000566006,-2.21728e-07,-7.6445e-08,0.929374,0.000565333,-4.51063e-07,5.08216e-08,0.929939,0.000564583,-2.98599e-07,-7.63212e-09,0.930503,0.000563963,-3.21495e-07,-2.02931e-08,0.931067,0.000563259,-3.82374e-07,2.92001e-08,0.93163,0.000562582,-2.94774e-07,-3.69025e-08,0.932192,0.000561882,-4.05482e-07,5.88053e-08,0.932754,0.000561247,-2.29066e-07,-7.91094e-08,0.933315,0.000560552,-4.66394e-07,7.88184e-08,0.933875,0.000559856,-2.29939e-07,-5.73501e-08,0.934434,0.000559224,-4.01989e-07,3.13727e-08,0.934993,0.000558514,-3.07871e-07,-8.53611e-09,0.935551,0.000557873,-3.33479e-07,2.77175e-09,0.936109,0.000557214,-3.25164e-07,-2.55091e-09,0.936666,0.000556556,-3.32817e-07,7.43188e-09,0.937222,0.000555913,-3.10521e-07,-2.71766e-08,0.937778,0.00055521,-3.92051e-07,4.167e-08,0.938333,0.000554551,-2.67041e-07,-2.02941e-08,0.938887,0.000553956,-3.27923e-07,-2.00984e-08,0.93944,0.00055324,-3.88218e-07,4.10828e-08,0.939993,0.000552587,-2.6497e-07,-2.50237e-08,0.940546,0.000551982,-3.40041e-07,-5.92583e-10,0.941097,0.0005513,-3.41819e-07,2.7394e-08,0.941648,0.000550698,-2.59637e-07,-4.93788e-08,0.942199,0.000550031,-4.07773e-07,5.09119e-08,0.942748,0.000549368,-2.55038e-07,-3.50595e-08,0.943297,0.000548753,-3.60216e-07,2.97214e-08,0.943846,0.000548122,-2.71052e-07,-2.42215e-08,0.944394,0.000547507,-3.43716e-07,7.55985e-09,0.944941,0.000546842,-3.21037e-07,-6.01796e-09,0.945487,0.000546182,-3.3909e-07,1.65119e-08,0.946033,0.000545553,-2.89555e-07,-4.2498e-10,0.946578,0.000544973,-2.9083e-07,-1.4812e-08,0.947123,0.000544347,-3.35266e-07,6.83068e-11,0.947667,0.000543676,-3.35061e-07,1.45388e-08,0.94821,0.00054305,-2.91444e-07,1.38123e-09,0.948753,0.000542471,-2.87301e-07,-2.00637e-08,0.949295,0.000541836,-3.47492e-07,1.92688e-08,0.949837,0.000541199,-2.89685e-07,2.59298e-09,0.950378,0.000540628,-2.81906e-07,-2.96407e-08,0.950918,0.000539975,-3.70829e-07,5.63652e-08,0.951458,0.000539402,-2.01733e-07,-7.66107e-08,0.951997,0.000538769,-4.31565e-07,7.12638e-08,0.952535,0.00053812,-2.17774e-07,-2.96305e-08,0.953073,0.000537595,-3.06665e-07,-1.23464e-08,0.95361,0.000536945,-3.43704e-07,1.94114e-08,0.954147,0.000536316,-2.8547e-07,-5.69451e-09,0.954683,0.000535728,-3.02554e-07,3.36666e-09,0.955219,0.000535133,-2.92454e-07,-7.77208e-09,0.955753,0.000534525,-3.1577e-07,2.77216e-08,0.956288,0.000533976,-2.32605e-07,-4.35097e-08,0.956821,0.00053338,-3.63134e-07,2.7108e-08,0.957354,0.000532735,-2.8181e-07,-5.31772e-09,0.957887,0.000532156,-2.97764e-07,-5.83718e-09,0.958419,0.000531543,-3.15275e-07,2.86664e-08,0.95895,0.000530998,-2.29276e-07,-4.9224e-08,0.959481,0.000530392,-3.76948e-07,4.90201e-08,0.960011,0.000529785,-2.29887e-07,-2.76471e-08,0.96054,0.000529243,-3.12829e-07,1.96385e-09,0.961069,0.000528623,-3.06937e-07,1.97917e-08,0.961598,0.000528068,-2.47562e-07,-2.15261e-08,0.962125,0.000527508,-3.1214e-07,6.70795e-09,0.962653,0.000526904,-2.92016e-07,-5.30573e-09,0.963179,0.000526304,-3.07934e-07,1.4515e-08,0.963705,0.000525732,-2.64389e-07,6.85048e-09,0.964231,0.000525224,-2.43837e-07,-4.19169e-08,0.964756,0.00052461,-3.69588e-07,4.1608e-08,0.96528,0.000523996,-2.44764e-07,-5.30598e-09,0.965804,0.000523491,-2.60682e-07,-2.03841e-08,0.966327,0.000522908,-3.21834e-07,2.72378e-08,0.966849,0.000522346,-2.40121e-07,-2.89625e-08,0.967371,0.000521779,-3.27008e-07,2.90075e-08,0.967893,0.000521212,-2.39986e-07,-2.74629e-08,0.968414,0.00052065,-3.22374e-07,2.12396e-08,0.968934,0.000520069,-2.58656e-07,2.10922e-09,0.969454,0.000519558,-2.52328e-07,-2.96765e-08,0.969973,0.000518964,-3.41357e-07,5.6992e-08,0.970492,0.000518452,-1.70382e-07,-7.90821e-08,0.97101,0.000517874,-4.07628e-07,8.05224e-08,0.971528,0.000517301,-1.66061e-07,-6.41937e-08,0.972045,0.000516776,-3.58642e-07,5.70429e-08,0.972561,0.00051623,-1.87513e-07,-4.47686e-08,0.973077,0.00051572,-3.21819e-07,2.82237e-09,0.973593,0.000515085,-3.13352e-07,3.34792e-08,0.974108,0.000514559,-2.12914e-07,-1.75298e-08,0.974622,0.000514081,-2.65503e-07,-2.29648e-08,0.975136,0.000513481,-3.34398e-07,4.97843e-08,0.975649,0.000512961,-1.85045e-07,-5.6963e-08,0.976162,0.00051242,-3.55934e-07,5.88585e-08,0.976674,0.000511885,-1.79359e-07,-5.92616e-08,0.977185,0.000511348,-3.57143e-07,5.89785e-08,0.977696,0.000510811,-1.80208e-07,-5.74433e-08,0.978207,0.000510278,-3.52538e-07,5.15854e-08,0.978717,0.000509728,-1.97781e-07,-2.9689e-08,0.979226,0.000509243,-2.86848e-07,7.56591e-09,0.979735,0.000508692,-2.64151e-07,-5.74649e-10,0.980244,0.000508162,-2.65875e-07,-5.26732e-09,0.980752,0.000507615,-2.81677e-07,2.16439e-08,0.981259,0.000507116,-2.16745e-07,-2.17037e-08,0.981766,0.000506618,-2.81856e-07,5.56636e-09,0.982272,0.000506071,-2.65157e-07,-5.61689e-10,0.982778,0.000505539,-2.66842e-07,-3.31963e-09,0.983283,0.000504995,-2.76801e-07,1.38402e-08,0.983788,0.000504483,-2.3528e-07,7.56339e-09,0.984292,0.000504035,-2.1259e-07,-4.40938e-08,0.984796,0.000503478,-3.44871e-07,4.96026e-08,0.985299,0.000502937,-1.96064e-07,-3.51071e-08,0.985802,0.000502439,-3.01385e-07,3.12212e-08,0.986304,0.00050193,-2.07721e-07,-3.0173e-08,0.986806,0.000501424,-2.9824e-07,2.9866e-08,0.987307,0.000500917,-2.08642e-07,-2.96865e-08,0.987808,0.000500411,-2.97702e-07,2.92753e-08,0.988308,0.000499903,-2.09876e-07,-2.78101e-08,0.988807,0.0004994,-2.93306e-07,2.23604e-08,0.989307,0.000498881,-2.26225e-07,-2.02681e-09,0.989805,0.000498422,-2.32305e-07,-1.42531e-08,0.990303,0.000497915,-2.75065e-07,-5.65232e-10,0.990801,0.000497363,-2.76761e-07,1.65141e-08,0.991298,0.000496859,-2.27218e-07,-5.88639e-09,0.991795,0.000496387,-2.44878e-07,7.0315e-09,0.992291,0.000495918,-2.23783e-07,-2.22396e-08,0.992787,0.000495404,-2.90502e-07,2.23224e-08,0.993282,0.00049489,-2.23535e-07,-7.44543e-09,0.993776,0.000494421,-2.45871e-07,7.45924e-09,0.994271,0.000493951,-2.23493e-07,-2.23915e-08,0.994764,0.000493437,-2.90668e-07,2.25021e-08,0.995257,0.000492923,-2.23161e-07,-8.01218e-09,0.99575,0.000492453,-2.47198e-07,9.54669e-09,0.996242,0.000491987,-2.18558e-07,-3.01746e-08,0.996734,0.000491459,-3.09082e-07,5.1547e-08,0.997225,0.000490996,-1.54441e-07,-5.68039e-08,0.997716,0.000490517,-3.24853e-07,5.64594e-08,0.998206,0.000490036,-1.55474e-07,-4.98245e-08,0.998696,0.000489576,-3.04948e-07,2.36292e-08,0.999186,0.000489037,-2.3406e-07,1.49121e-08,0.999674,0.000488613,-1.89324e-07,-2.3673e-08,1.00016,0.000488164,-2.60343e-07,2.01754e-08,1.00065,0.000487704,-1.99816e-07,-5.70288e-08,1.00114,0.000487133,-3.70903e-07,8.87303e-08,1.00162,0.000486657,-1.04712e-07,-5.94737e-08,1.00211,0.000486269,-2.83133e-07,2.99553e-08,1.0026,0.000485793,-1.93267e-07,-6.03474e-08,1.00308,0.000485225,-3.74309e-07,9.2225e-08,1.00357,0.000484754,-9.76345e-08,-7.0134e-08,1.00405,0.000484348,-3.08036e-07,6.91016e-08,1.00454,0.000483939,-1.00731e-07,-8.70633e-08,1.00502,0.000483476,-3.61921e-07,4.07328e-08,1.0055,0.000482875,-2.39723e-07,4.33413e-08,1.00599,0.000482525,-1.09699e-07,-9.48886e-08,1.00647,0.000482021,-3.94365e-07,9.77947e-08,1.00695,0.000481526,-1.00981e-07,-5.78713e-08,1.00743,0.00048115,-2.74595e-07,1.44814e-08,1.00791,0.000480645,-2.31151e-07,-5.42665e-11,1.00839,0.000480182,-2.31314e-07,-1.42643e-08,1.00887,0.000479677,-2.74106e-07,5.71115e-08,1.00935,0.0004793,-1.02772e-07,-9.49724e-08,1.00983,0.000478809,-3.87689e-07,8.43596e-08,1.01031,0.000478287,-1.3461e-07,-4.04755e-09,1.01079,0.000478006,-1.46753e-07,-6.81694e-08,1.01127,0.000477508,-3.51261e-07,3.83067e-08,1.01174,0.00047692,-2.36341e-07,3.41521e-08,1.01222,0.00047655,-1.33885e-07,-5.57058e-08,1.0127,0.000476115,-3.01002e-07,6.94616e-08,1.01317,0.000475721,-9.26174e-08,-1.02931e-07,1.01365,0.000475227,-4.01412e-07,1.03846e-07,1.01412,0.000474736,-8.98751e-08,-7.40321e-08,1.0146,0.000474334,-3.11971e-07,7.30735e-08,1.01507,0.00047393,-9.27508e-08,-9.90527e-08,1.01554,0.000473447,-3.89909e-07,8.47188e-08,1.01602,0.000472921,-1.35753e-07,-1.40381e-09,1.01649,0.000472645,-1.39964e-07,-7.91035e-08,1.01696,0.000472128,-3.77275e-07,7.93993e-08,1.01744,0.000471612,-1.39077e-07,-7.52607e-11,1.01791,0.000471334,-1.39302e-07,-7.90983e-08,1.01838,0.000470818,-3.76597e-07,7.80499e-08,1.01885,0.000470299,-1.42448e-07,5.31733e-09,1.01932,0.00047003,-1.26496e-07,-9.93193e-08,1.01979,0.000469479,-4.24453e-07,1.53541e-07,1.02026,0.00046909,3.617e-08,-1.57217e-07,1.02073,0.000468691,-4.35482e-07,1.177e-07,1.02119,0.000468173,-8.23808e-08,-7.51659e-08,1.02166,0.000467783,-3.07878e-07,6.37538e-08,1.02213,0.000467358,-1.16617e-07,-6.064e-08,1.0226,0.000466943,-2.98537e-07,5.9597e-08,1.02306,0.000466525,-1.19746e-07,-5.85386e-08,1.02353,0.00046611,-2.95362e-07,5.53482e-08,1.024,0.000465685,-1.29317e-07,-4.36449e-08,1.02446,0.000465296,-2.60252e-07,2.20268e-11,1.02493,0.000464775,-2.60186e-07,4.35568e-08,1.02539,0.000464386,-1.29516e-07,-5.50398e-08,1.02586,0.000463961,-2.94635e-07,5.73932e-08,1.02632,0.000463544,-1.22456e-07,-5.53236e-08,1.02678,0.000463133,-2.88426e-07,4.46921e-08,1.02725,0.000462691,-1.5435e-07,-4.23534e-09,1.02771,0.000462369,-1.67056e-07,-2.77507e-08,1.02817,0.000461952,-2.50308e-07,-3.97101e-09,1.02863,0.000461439,-2.62221e-07,4.36348e-08,1.02909,0.000461046,-1.31317e-07,-5.13589e-08,1.02955,0.000460629,-2.85394e-07,4.25913e-08,1.03001,0.000460186,-1.5762e-07,2.0285e-10,1.03047,0.000459871,-1.57011e-07,-4.34027e-08,1.03093,0.000459427,-2.87219e-07,5.41987e-08,1.03139,0.000459015,-1.24623e-07,-5.4183e-08,1.03185,0.000458604,-2.87172e-07,4.33239e-08,1.03231,0.000458159,-1.572e-07,9.65817e-11,1.03277,0.000457845,-1.56911e-07,-4.37103e-08,1.03323,0.0004574,-2.88041e-07,5.55351e-08,1.03368,0.000456991,-1.21436e-07,-5.9221e-08,1.03414,0.00045657,-2.99099e-07,6.21394e-08,1.0346,0.000456158,-1.1268e-07,-7.01275e-08,1.03505,0.000455723,-3.23063e-07,9.91614e-08,1.03551,0.000455374,-2.55788e-08,-8.80996e-08,1.03596,0.000455058,-2.89878e-07,1.48184e-08,1.03642,0.000454523,-2.45422e-07,2.88258e-08,1.03687,0.000454119,-1.58945e-07,-1.09125e-08,1.03733,0.000453768,-1.91682e-07,1.48241e-08,1.03778,0.000453429,-1.4721e-07,-4.83838e-08,1.03823,0.00045299,-2.92361e-07,5.95019e-08,1.03869,0.000452584,-1.13856e-07,-7.04146e-08,1.03914,0.000452145,-3.25099e-07,1.02947e-07,1.03959,0.000451803,-1.62583e-08,-1.02955e-07,1.04004,0.000451462,-3.25123e-07,7.04544e-08,1.04049,0.000451023,-1.1376e-07,-5.96534e-08,1.04094,0.000450616,-2.9272e-07,4.89499e-08,1.04139,0.000450178,-1.45871e-07,-1.69369e-08,1.04184,0.000449835,-1.96681e-07,1.87977e-08,1.04229,0.000449498,-1.40288e-07,-5.82539e-08,1.04274,0.000449043,-3.1505e-07,9.50087e-08,1.04319,0.000448698,-3.00238e-08,-8.33623e-08,1.04364,0.000448388,-2.80111e-07,2.20363e-11,1.04409,0.000447828,-2.80045e-07,8.32742e-08,1.04454,0.000447517,-3.02221e-08,-9.47002e-08,1.04498,0.000447173,-3.14323e-07,5.7108e-08,1.04543,0.000446716,-1.42999e-07,-1.45225e-08,1.04588,0.000446386,-1.86566e-07,9.82022e-10,1.04632,0.000446016,-1.8362e-07,1.05944e-08,1.04677,0.00044568,-1.51837e-07,-4.33597e-08,1.04721,0.000445247,-2.81916e-07,4.36352e-08,1.04766,0.000444814,-1.51011e-07,-1.19717e-08,1.0481,0.000444476,-1.86926e-07,4.25158e-09,1.04855,0.000444115,-1.74171e-07,-5.03461e-09,1.04899,0.000443751,-1.89275e-07,1.58868e-08,1.04944,0.00044342,-1.41614e-07,-5.85127e-08,1.04988,0.000442961,-3.17152e-07,9.89548e-08,1.05032,0.000442624,-2.0288e-08,-9.88878e-08,1.05076,0.000442287,-3.16951e-07,5.81779e-08,1.05121,0.000441827,-1.42418e-07,-1.46144e-08,1.05165,0.000441499,-1.86261e-07,2.79892e-10,1.05209,0.000441127,-1.85421e-07,1.34949e-08,1.05253,0.000440797,-1.44937e-07,-5.42594e-08,1.05297,0.000440344,-3.07715e-07,8.43335e-08,1.05341,0.000439982,-5.47146e-08,-4.46558e-08,1.05385,0.000439738,-1.88682e-07,-2.49193e-08,1.05429,0.000439286,-2.6344e-07,2.5124e-08,1.05473,0.000438835,-1.88068e-07,4.36328e-08,1.05517,0.000438589,-5.71699e-08,-8.04459e-08,1.05561,0.000438234,-2.98508e-07,3.97324e-08,1.05605,0.000437756,-1.79311e-07,4.07258e-08,1.05648,0.000437519,-5.71332e-08,-8.34263e-08,1.05692,0.000437155,-3.07412e-07,5.45608e-08,1.05736,0.000436704,-1.4373e-07,-1.56078e-08,1.05779,0.000436369,-1.90553e-07,7.87043e-09,1.05823,0.000436012,-1.66942e-07,-1.58739e-08,1.05867,0.00043563,-2.14563e-07,5.56251e-08,1.0591,0.000435368,-4.76881e-08,-8.74172e-08,1.05954,0.000435011,-3.0994e-07,5.56251e-08,1.05997,0.000434558,-1.43064e-07,-1.58739e-08,1.06041,0.000434224,-1.90686e-07,7.87042e-09,1.06084,0.000433866,-1.67075e-07,-1.56078e-08,1.06127,0.000433485,-2.13898e-07,5.45609e-08,1.06171,0.000433221,-5.02157e-08,-8.34263e-08,1.06214,0.00043287,-3.00495e-07,4.07258e-08,1.06257,0.000432391,-1.78317e-07,3.97325e-08,1.063,0.000432154,-5.91198e-08,-8.04464e-08,1.06344,0.000431794,-3.00459e-07,4.36347e-08,1.06387,0.000431324,-1.69555e-07,2.5117e-08,1.0643,0.000431061,-9.42041e-08,-2.48934e-08,1.06473,0.000430798,-1.68884e-07,-4.47527e-08,1.06516,0.000430326,-3.03142e-07,8.46951e-08,1.06559,0.000429973,-4.90573e-08,-5.56089e-08,1.06602,0.000429708,-2.15884e-07,1.85314e-08,1.06645,0.000429332,-1.6029e-07,-1.85166e-08,1.06688,0.000428956,-2.1584e-07,5.5535e-08,1.06731,0.000428691,-4.92347e-08,-8.44142e-08,1.06774,0.000428339,-3.02477e-07,4.37032e-08,1.06816,0.000427865,-1.71368e-07,2.88107e-08,1.06859,0.000427609,-8.49356e-08,-3.97367e-08,1.06902,0.00042732,-2.04146e-07,1.09267e-08,1.06945,0.000426945,-1.71365e-07,-3.97023e-09,1.06987,0.00042659,-1.83276e-07,4.9542e-09,1.0703,0.000426238,-1.68414e-07,-1.58466e-08,1.07073,0.000425854,-2.15953e-07,5.84321e-08,1.07115,0.000425597,-4.0657e-08,-9.86725e-08,1.07158,0.00042522,-3.36674e-07,9.78392e-08,1.072,0.00042484,-4.31568e-08,-5.42658e-08,1.07243,0.000424591,-2.05954e-07,1.45377e-11,1.07285,0.000424179,-2.0591e-07,5.42076e-08,1.07328,0.00042393,-4.32877e-08,-9.76357e-08,1.0737,0.00042355,-3.36195e-07,9.79165e-08,1.07412,0.000423172,-4.24451e-08,-5.56118e-08,1.07455,0.00042292,-2.09281e-07,5.32143e-09,1.07497,0.000422518,-1.93316e-07,3.43261e-08,1.07539,0.000422234,-9.0338e-08,-2.34165e-08,1.07581,0.000421983,-1.60588e-07,-5.98692e-08,1.07623,0.000421482,-3.40195e-07,1.43684e-07,1.07666,0.000421233,9.08574e-08,-1.5724e-07,1.07708,0.000420943,-3.80862e-07,1.27647e-07,1.0775,0.000420564,2.0791e-09,-1.1493e-07,1.07792,0.000420223,-3.4271e-07,9.36534e-08,1.07834,0.000419819,-6.17499e-08,-2.12653e-08,1.07876,0.000419632,-1.25546e-07,-8.59219e-09,1.07918,0.000419355,-1.51322e-07,-6.35752e-08,1.0796,0.000418861,-3.42048e-07,1.43684e-07,1.08002,0.000418608,8.90034e-08,-1.53532e-07,1.08043,0.000418326,-3.71593e-07,1.12817e-07,1.08085,0.000417921,-3.31414e-08,-5.93184e-08,1.08127,0.000417677,-2.11097e-07,5.24697e-09,1.08169,0.00041727,-1.95356e-07,3.83305e-08,1.0821,0.000416995,-8.03642e-08,-3.93597e-08,1.08252,0.000416716,-1.98443e-07,-1.0094e-10,1.08294,0.000416319,-1.98746e-07,3.97635e-08,1.08335,0.00041604,-7.94557e-08,-3.97437e-08,1.08377,0.000415762,-1.98687e-07,1.94215e-12,1.08419,0.000415365,-1.98681e-07,3.97359e-08,1.0846,0.000415087,-7.94732e-08,-3.97362e-08,1.08502,0.000414809,-1.98682e-07,-4.31063e-13,1.08543,0.000414411,-1.98683e-07,3.97379e-08,1.08584,0.000414133,-7.94694e-08,-3.97418e-08,1.08626,0.000413855,-1.98695e-07,2.00563e-11,1.08667,0.000413458,-1.98635e-07,3.96616e-08,1.08709,0.000413179,-7.965e-08,-3.9457e-08,1.0875,0.000412902,-1.98021e-07,-1.04281e-09,1.08791,0.000412502,-2.01149e-07,4.36282e-08,1.08832,0.000412231,-7.02648e-08,-5.42608e-08,1.08874,0.000411928,-2.33047e-07,5.42057e-08,1.08915,0.000411624,-7.04301e-08,-4.33527e-08,1.08956,0.000411353,-2.00488e-07,-4.07378e-12,1.08997,0.000410952,-2.005e-07,4.3369e-08,1.09038,0.000410681,-7.03934e-08,-5.42627e-08,1.09079,0.000410378,-2.33182e-07,5.44726e-08,1.0912,0.000410075,-6.97637e-08,-4.44186e-08,1.09161,0.000409802,-2.03019e-07,3.99235e-09,1.09202,0.000409408,-1.91042e-07,2.84491e-08,1.09243,0.000409111,-1.05695e-07,1.42043e-09,1.09284,0.000408904,-1.01434e-07,-3.41308e-08,1.09325,0.000408599,-2.03826e-07,1.58937e-08,1.09366,0.000408239,-1.56145e-07,-2.94438e-08,1.09406,0.000407838,-2.44476e-07,1.01881e-07,1.09447,0.000407655,6.11676e-08,-1.39663e-07,1.09488,0.000407358,-3.57822e-07,9.91432e-08,1.09529,0.00040694,-6.03921e-08,-1.84912e-08,1.09569,0.000406764,-1.15866e-07,-2.51785e-08,1.0961,0.000406457,-1.91401e-07,-4.03115e-12,1.09651,0.000406074,-1.91413e-07,2.51947e-08,1.09691,0.000405767,-1.15829e-07,1.84346e-08,1.09732,0.00040559,-6.05254e-08,-9.89332e-08,1.09772,0.000405172,-3.57325e-07,1.3888e-07,1.09813,0.000404874,5.93136e-08,-9.8957e-08,1.09853,0.000404696,-2.37557e-07,1.853e-08,1.09894,0.000404277,-1.81968e-07,2.48372e-08,1.09934,0.000403987,-1.07456e-07,1.33047e-09,1.09975,0.000403776,-1.03465e-07,-3.01591e-08,1.10015,0.000403479,-1.93942e-07,9.66054e-11,1.10055,0.000403091,-1.93652e-07,2.97727e-08,1.10096,0.000402793,-1.04334e-07,2.19273e-11,1.10136,0.000402585,-1.04268e-07,-2.98604e-08,1.10176,0.000402287,-1.93849e-07,2.10325e-10,1.10216,0.0004019,-1.93218e-07,2.90191e-08,1.10256,0.0004016,-1.06161e-07,2.92264e-09,1.10297,0.000401397,-9.73931e-08,-4.07096e-08,1.10337,0.00040108,-2.19522e-07,4.07067e-08,1.10377,0.000400763,-9.7402e-08,-2.90783e-09,1.10417,0.000400559,-1.06126e-07,-2.90754e-08,1.10457,0.00040026,-1.93352e-07,9.00021e-14,1.10497,0.000399873,-1.93351e-07,2.9075e-08,1.10537,0.000399574,-1.06126e-07,2.90902e-09,1.10577,0.00039937,-9.73992e-08,-4.07111e-08,1.10617,0.000399053,-2.19533e-07,4.07262e-08,1.10657,0.000398736,-9.73541e-08,-2.98424e-09,1.10697,0.000398533,-1.06307e-07,-2.87892e-08,1.10736,0.000398234,-1.92674e-07,-1.06824e-09,1.10776,0.000397845,-1.95879e-07,3.30622e-08,1.10816,0.000397552,-9.66926e-08,-1.19712e-08,1.10856,0.000397323,-1.32606e-07,1.48225e-08,1.10895,0.000397102,-8.81387e-08,-4.73187e-08,1.10935,0.000396784,-2.30095e-07,5.52429e-08,1.10975,0.00039649,-6.4366e-08,-5.44437e-08,1.11014,0.000396198,-2.27697e-07,4.33226e-08,1.11054,0.000395872,-9.77293e-08,3.62656e-10,1.11094,0.000395678,-9.66414e-08,-4.47732e-08,1.11133,0.00039535,-2.30961e-07,5.95208e-08,1.11173,0.000395067,-5.23985e-08,-7.41008e-08,1.11212,0.00039474,-2.74701e-07,1.17673e-07,1.11252,0.000394543,7.83181e-08,-1.58172e-07,1.11291,0.000394225,-3.96199e-07,1.57389e-07,1.1133,0.000393905,7.59679e-08,-1.13756e-07,1.1137,0.000393716,-2.653e-07,5.92165e-08,1.11409,0.000393363,-8.76507e-08,-3.90074e-09,1.11449,0.000393176,-9.93529e-08,-4.36136e-08,1.11488,0.000392846,-2.30194e-07,5.91457e-08,1.11527,0.000392563,-5.27564e-08,-7.376e-08,1.11566,0.000392237,-2.74037e-07,1.16685e-07,1.11606,0.000392039,7.60189e-08,-1.54562e-07,1.11645,0.000391727,-3.87667e-07,1.43935e-07,1.11684,0.000391384,4.4137e-08,-6.35487e-08,1.11723,0.000391281,-1.46509e-07,-8.94896e-09,1.11762,0.000390961,-1.73356e-07,-1.98647e-08,1.11801,0.000390555,-2.3295e-07,8.8408e-08,1.1184,0.000390354,3.22736e-08,-9.53486e-08,1.11879,0.000390133,-2.53772e-07,5.45677e-08,1.11918,0.000389789,-9.0069e-08,-3.71296e-09,1.11957,0.000389598,-1.01208e-07,-3.97159e-08,1.11996,0.000389276,-2.20355e-07,4.33671e-08,1.12035,0.000388966,-9.02542e-08,-1.45431e-08,1.12074,0.000388741,-1.33883e-07,1.48052e-08,1.12113,0.000388518,-8.94678e-08,-4.46778e-08,1.12152,0.000388205,-2.23501e-07,4.46966e-08,1.12191,0.000387892,-8.94114e-08,-1.48992e-08,1.12229,0.000387669,-1.34109e-07,1.49003e-08,1.12268,0.000387445,-8.94082e-08,-4.47019e-08,1.12307,0.000387132,-2.23514e-07,4.4698e-08,1.12345,0.000386819,-8.942e-08,-1.48806e-08,1.12384,0.000386596,-1.34062e-07,1.48245e-08,1.12423,0.000386372,-8.95885e-08,-4.44172e-08,1.12461,0.00038606,-2.2284e-07,4.36351e-08,1.125,0.000385745,-9.19348e-08,-1.09139e-08,1.12539,0.000385528,-1.24677e-07,2.05584e-11,1.12577,0.000385279,-1.24615e-07,1.08317e-08,1.12616,0.000385062,-9.21198e-08,-4.33473e-08,1.12654,0.000384748,-2.22162e-07,4.33481e-08,1.12693,0.000384434,-9.21174e-08,-1.08356e-08,1.12731,0.000384217,-1.24624e-07,-5.50907e-12,1.12769,0.000383968,-1.24641e-07,1.08577e-08,1.12808,0.000383751,-9.20679e-08,-4.34252e-08,1.12846,0.000383437,-2.22343e-07,4.36337e-08,1.12884,0.000383123,-9.14422e-08,-1.19005e-08,1.12923,0.000382904,-1.27144e-07,3.96813e-09,1.12961,0.000382662,-1.15239e-07,-3.97207e-09,1.12999,0.000382419,-1.27155e-07,1.19201e-08,1.13038,0.000382201,-9.1395e-08,-4.37085e-08,1.13076,0.000381887,-2.2252e-07,4.37046e-08,1.13114,0.000381573,-9.14068e-08,-1.19005e-08,1.13152,0.000381355,-1.27108e-07,3.89734e-09,1.1319,0.000381112,-1.15416e-07,-3.68887e-09,1.13228,0.00038087,-1.26483e-07,1.08582e-08,1.13266,0.00038065,-9.39083e-08,-3.97438e-08,1.13304,0.000380343,-2.1314e-07,2.89076e-08,1.13342,0.000380003,-1.26417e-07,4.33225e-08,1.1338,0.00037988,3.55072e-09,-8.29883e-08,1.13418,0.000379638,-2.45414e-07,5.0212e-08,1.13456,0.000379298,-9.47781e-08,1.34964e-09,1.13494,0.000379113,-9.07292e-08,-5.56105e-08,1.13532,0.000378764,-2.57561e-07,1.01883e-07,1.1357,0.000378555,4.80889e-08,-1.13504e-07,1.13608,0.000378311,-2.92423e-07,1.13713e-07,1.13646,0.000378067,4.87176e-08,-1.02931e-07,1.13683,0.000377856,-2.60076e-07,5.95923e-08,1.13721,0.000377514,-8.12988e-08,-1.62288e-08,1.13759,0.000377303,-1.29985e-07,5.32278e-09,1.13797,0.000377059,-1.14017e-07,-5.06237e-09,1.13834,0.000376816,-1.29204e-07,1.49267e-08,1.13872,0.000376602,-8.44237e-08,-5.46444e-08,1.1391,0.000376269,-2.48357e-07,8.44417e-08,1.13947,0.000376026,4.96815e-09,-4.47039e-08,1.13985,0.000375902,-1.29143e-07,-2.48355e-08,1.14023,0.000375569,-2.0365e-07,2.48368e-08,1.1406,0.000375236,-1.2914e-07,4.46977e-08,1.14098,0.000375112,4.95341e-09,-8.44184e-08,1.14135,0.000374869,-2.48302e-07,5.45572e-08,1.14173,0.000374536,-8.463e-08,-1.46013e-08,1.1421,0.000374323,-1.28434e-07,3.8478e-09,1.14247,0.000374077,-1.1689e-07,-7.89941e-10,1.14285,0.000373841,-1.1926e-07,-6.88042e-10,1.14322,0.0003736,-1.21324e-07,3.54213e-09,1.1436,0.000373368,-1.10698e-07,-1.34805e-08,1.14397,0.000373107,-1.51139e-07,5.03798e-08,1.14434,0.000372767,0.,0.};
+
+ template <bool srgb, int blueIdx, typename T, typename D>
+ __device__ __forceinline__ void RGB2LuvConvert_32F(const T& src, D& dst)
+ {
+ const float _d = 1.f / (0.950456f + 15 + 1.088754f * 3);
+ const float _un = 13 * (4 * 0.950456f * _d);
+ const float _vn = 13 * (9 * _d);
+
+ float B = blueIdx == 0 ? src.x : src.z;
+ float G = src.y;
+ float R = blueIdx == 0 ? src.z : src.x;
+
+ if (srgb)
+ {
+ B = splineInterpolate(B * GAMMA_TAB_SIZE, c_sRGBGammaTab, GAMMA_TAB_SIZE);
+ G = splineInterpolate(G * GAMMA_TAB_SIZE, c_sRGBGammaTab, GAMMA_TAB_SIZE);
+ R = splineInterpolate(R * GAMMA_TAB_SIZE, c_sRGBGammaTab, GAMMA_TAB_SIZE);
+ }
+
+ float X = R * 0.412453f + G * 0.357580f + B * 0.180423f;
+ float Y = R * 0.212671f + G * 0.715160f + B * 0.072169f;
+ float Z = R * 0.019334f + G * 0.119193f + B * 0.950227f;
+
+ float L = splineInterpolate(Y * (LAB_CBRT_TAB_SIZE / 1.5f), c_LabCbrtTab, LAB_CBRT_TAB_SIZE);
+ L = 116.f * L - 16.f;
+
+ const float d = (4 * 13) / ::fmaxf(X + 15 * Y + 3 * Z, numeric_limits<float>::epsilon());
+ float u = L * (X * d - _un);
+ float v = L * ((9 * 0.25f) * Y * d - _vn);
+
+ dst.x = L;
+ dst.y = u;
+ dst.z = v;
+ }
+
+ template <bool srgb, int blueIdx, typename T, typename D>
+ __device__ __forceinline__ void RGB2LuvConvert_8U(const T& src, D& dst)
+ {
+ float3 srcf, dstf;
+
+ srcf.x = src.x * (1.f / 255.f);
+ srcf.y = src.y * (1.f / 255.f);
+ srcf.z = src.z * (1.f / 255.f);
+
+ RGB2LuvConvert_32F<srgb, blueIdx>(srcf, dstf);
+
+ dst.x = saturate_cast<uchar>(dstf.x * 2.55f);
+ dst.y = saturate_cast<uchar>(dstf.y * 0.72033898305084743f + 96.525423728813564f);
+ dst.z = saturate_cast<uchar>(dstf.z * 0.9732824427480916f + 136.259541984732824f);
+ }
+
+ template <typename T, int scn, int dcn, bool srgb, int blueIdx> struct RGB2Luv;
+
+ template <int scn, int dcn, bool srgb, int blueIdx>
+ struct RGB2Luv<uchar, scn, dcn, srgb, blueIdx>
+ : unary_function<typename TypeVec<uchar, scn>::vec_type, typename TypeVec<uchar, dcn>::vec_type>
+ {
+ __device__ __forceinline__ typename TypeVec<uchar, dcn>::vec_type operator ()(const typename TypeVec<uchar, scn>::vec_type& src) const
+ {
+ typename TypeVec<uchar, dcn>::vec_type dst;
+
+ RGB2LuvConvert_8U<srgb, blueIdx>(src, dst);
+
+ return dst;
+ }
+
+ __host__ __device__ __forceinline__ RGB2Luv() {}
+ __host__ __device__ __forceinline__ RGB2Luv(const RGB2Luv&) {}
+ };
+
+ template <int scn, int dcn, bool srgb, int blueIdx>
+ struct RGB2Luv<float, scn, dcn, srgb, blueIdx>
+ : unary_function<typename TypeVec<float, scn>::vec_type, typename TypeVec<float, dcn>::vec_type>
+ {
+ __device__ __forceinline__ typename TypeVec<float, dcn>::vec_type operator ()(const typename TypeVec<float, scn>::vec_type& src) const
+ {
+ typename TypeVec<float, dcn>::vec_type dst;
+
+ RGB2LuvConvert_32F<srgb, blueIdx>(src, dst);
+
+ return dst;
+ }
+
+ __host__ __device__ __forceinline__ RGB2Luv() {}
+ __host__ __device__ __forceinline__ RGB2Luv(const RGB2Luv&) {}
+ };
+ }
+
+#define OPENCV_GPU_IMPLEMENT_RGB2Luv_TRAITS(name, scn, dcn, srgb, blueIdx) \
+ template <typename T> struct name ## _traits \
+ { \
+ typedef ::cv::gpu::device::color_detail::RGB2Luv<T, scn, dcn, srgb, blueIdx> functor_type; \
+ static __host__ __device__ __forceinline__ functor_type create_functor() \
+ { \
+ return functor_type(); \
+ } \
+ };
+
+ namespace color_detail
+ {
+ template <bool srgb, int blueIdx, typename T, typename D>
+ __device__ __forceinline__ void Luv2RGBConvert_32F(const T& src, D& dst)
+ {
+ const float _d = 1.f / (0.950456f + 15 + 1.088754f * 3);
+ const float _un = 4 * 0.950456f * _d;
+ const float _vn = 9 * _d;
+
+ float L = src.x;
+ float u = src.y;
+ float v = src.z;
+
+ float Y = (L + 16.f) * (1.f / 116.f);
+ Y = Y * Y * Y;
+
+ float d = (1.f / 13.f) / L;
+ u = u * d + _un;
+ v = v * d + _vn;
+
+ float iv = 1.f / v;
+ float X = 2.25f * u * Y * iv;
+ float Z = (12 - 3 * u - 20 * v) * Y * 0.25f * iv;
+
+ float B = 0.055648f * X - 0.204043f * Y + 1.057311f * Z;
+ float G = -0.969256f * X + 1.875991f * Y + 0.041556f * Z;
+ float R = 3.240479f * X - 1.537150f * Y - 0.498535f * Z;
+
+ if (srgb)
+ {
+ B = splineInterpolate(B * GAMMA_TAB_SIZE, c_sRGBInvGammaTab, GAMMA_TAB_SIZE);
+ G = splineInterpolate(G * GAMMA_TAB_SIZE, c_sRGBInvGammaTab, GAMMA_TAB_SIZE);
+ R = splineInterpolate(R * GAMMA_TAB_SIZE, c_sRGBInvGammaTab, GAMMA_TAB_SIZE);
+ }
+
+ dst.x = blueIdx == 0 ? B : R;
+ dst.y = G;
+ dst.z = blueIdx == 0 ? R : B;
+ setAlpha(dst, ColorChannel<float>::max());
+ }
+
+ template <bool srgb, int blueIdx, typename T, typename D>
+ __device__ __forceinline__ void Luv2RGBConvert_8U(const T& src, D& dst)
+ {
+ float3 srcf, dstf;
+
+ srcf.x = src.x * (100.f / 255.f);
+ srcf.y = src.y * 1.388235294117647f - 134.f;
+ srcf.z = src.z * 1.027450980392157f - 140.f;
+
+ Luv2RGBConvert_32F<srgb, blueIdx>(srcf, dstf);
+
+ dst.x = saturate_cast<uchar>(dstf.x * 255.f);
+ dst.y = saturate_cast<uchar>(dstf.y * 255.f);
+ dst.z = saturate_cast<uchar>(dstf.z * 255.f);
+ setAlpha(dst, ColorChannel<uchar>::max());
+ }
+
+ template <typename T, int scn, int dcn, bool srgb, int blueIdx> struct Luv2RGB;
+
+ template <int scn, int dcn, bool srgb, int blueIdx>
+ struct Luv2RGB<uchar, scn, dcn, srgb, blueIdx>
+ : unary_function<typename TypeVec<uchar, scn>::vec_type, typename TypeVec<uchar, dcn>::vec_type>
+ {
+ __device__ __forceinline__ typename TypeVec<uchar, dcn>::vec_type operator ()(const typename TypeVec<uchar, scn>::vec_type& src) const
+ {
+ typename TypeVec<uchar, dcn>::vec_type dst;
+
+ Luv2RGBConvert_8U<srgb, blueIdx>(src, dst);
+
+ return dst;
+ }
+
+ __host__ __device__ __forceinline__ Luv2RGB() {}
+ __host__ __device__ __forceinline__ Luv2RGB(const Luv2RGB&) {}
+ };
+
+ template <int scn, int dcn, bool srgb, int blueIdx>
+ struct Luv2RGB<float, scn, dcn, srgb, blueIdx>
+ : unary_function<typename TypeVec<float, scn>::vec_type, typename TypeVec<float, dcn>::vec_type>
+ {
+ __device__ __forceinline__ typename TypeVec<float, dcn>::vec_type operator ()(const typename TypeVec<float, scn>::vec_type& src) const
+ {
+ typename TypeVec<float, dcn>::vec_type dst;
+
+ Luv2RGBConvert_32F<srgb, blueIdx>(src, dst);
+
+ return dst;
+ }
+
+ __host__ __device__ __forceinline__ Luv2RGB() {}
+ __host__ __device__ __forceinline__ Luv2RGB(const Luv2RGB&) {}
+ };
+ }
+
+#define OPENCV_GPU_IMPLEMENT_Luv2RGB_TRAITS(name, scn, dcn, srgb, blueIdx) \
+ template <typename T> struct name ## _traits \
+ { \
+ typedef ::cv::gpu::device::color_detail::Luv2RGB<T, scn, dcn, srgb, blueIdx> functor_type; \
+ static __host__ __device__ __forceinline__ functor_type create_functor() \
+ { \
+ return functor_type(); \
+ } \
+ };
+
+ #undef CV_DESCALE
+
+}}} // namespace cv { namespace gpu { namespace device
+
+#endif // __OPENCV_GPU_COLOR_DETAIL_HPP__
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/detail/reduce.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/detail/reduce.hpp
new file mode 100644
index 00000000..091a160e
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/detail/reduce.hpp
@@ -0,0 +1,361 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_GPU_REDUCE_DETAIL_HPP__
+#define __OPENCV_GPU_REDUCE_DETAIL_HPP__
+
+#include <thrust/tuple.h>
+#include "../warp.hpp"
+#include "../warp_shuffle.hpp"
+
+namespace cv { namespace gpu { namespace device
+{
+ namespace reduce_detail
+ {
+ template <typename T> struct GetType;
+ template <typename T> struct GetType<T*>
+ {
+ typedef T type;
+ };
+ template <typename T> struct GetType<volatile T*>
+ {
+ typedef T type;
+ };
+ template <typename T> struct GetType<T&>
+ {
+ typedef T type;
+ };
+
+ template <unsigned int I, unsigned int N>
+ struct For
+ {
+ template <class PointerTuple, class ValTuple>
+ static __device__ void loadToSmem(const PointerTuple& smem, const ValTuple& val, unsigned int tid)
+ {
+ thrust::get<I>(smem)[tid] = thrust::get<I>(val);
+
+ For<I + 1, N>::loadToSmem(smem, val, tid);
+ }
+ template <class PointerTuple, class ValTuple>
+ static __device__ void loadFromSmem(const PointerTuple& smem, const ValTuple& val, unsigned int tid)
+ {
+ thrust::get<I>(val) = thrust::get<I>(smem)[tid];
+
+ For<I + 1, N>::loadFromSmem(smem, val, tid);
+ }
+
+ template <class PointerTuple, class ValTuple, class OpTuple>
+ static __device__ void merge(const PointerTuple& smem, const ValTuple& val, unsigned int tid, unsigned int delta, const OpTuple& op)
+ {
+ typename GetType<typename thrust::tuple_element<I, PointerTuple>::type>::type reg = thrust::get<I>(smem)[tid + delta];
+ thrust::get<I>(smem)[tid] = thrust::get<I>(val) = thrust::get<I>(op)(thrust::get<I>(val), reg);
+
+ For<I + 1, N>::merge(smem, val, tid, delta, op);
+ }
+ template <class ValTuple, class OpTuple>
+ static __device__ void mergeShfl(const ValTuple& val, unsigned int delta, unsigned int width, const OpTuple& op)
+ {
+ typename GetType<typename thrust::tuple_element<I, ValTuple>::type>::type reg = shfl_down(thrust::get<I>(val), delta, width);
+ thrust::get<I>(val) = thrust::get<I>(op)(thrust::get<I>(val), reg);
+
+ For<I + 1, N>::mergeShfl(val, delta, width, op);
+ }
+ };
+ template <unsigned int N>
+ struct For<N, N>
+ {
+ template <class PointerTuple, class ValTuple>
+ static __device__ void loadToSmem(const PointerTuple&, const ValTuple&, unsigned int)
+ {
+ }
+ template <class PointerTuple, class ValTuple>
+ static __device__ void loadFromSmem(const PointerTuple&, const ValTuple&, unsigned int)
+ {
+ }
+
+ template <class PointerTuple, class ValTuple, class OpTuple>
+ static __device__ void merge(const PointerTuple&, const ValTuple&, unsigned int, unsigned int, const OpTuple&)
+ {
+ }
+ template <class ValTuple, class OpTuple>
+ static __device__ void mergeShfl(const ValTuple&, unsigned int, unsigned int, const OpTuple&)
+ {
+ }
+ };
+
+ template <typename T>
+ __device__ __forceinline__ void loadToSmem(volatile T* smem, T& val, unsigned int tid)
+ {
+ smem[tid] = val;
+ }
+ template <typename T>
+ __device__ __forceinline__ void loadFromSmem(volatile T* smem, T& val, unsigned int tid)
+ {
+ val = smem[tid];
+ }
+ template <typename P0, typename P1, typename P2, typename P3, typename P4, typename P5, typename P6, typename P7, typename P8, typename P9,
+ typename R0, typename R1, typename R2, typename R3, typename R4, typename R5, typename R6, typename R7, typename R8, typename R9>
+ __device__ __forceinline__ void loadToSmem(const thrust::tuple<P0, P1, P2, P3, P4, P5, P6, P7, P8, P9>& smem,
+ const thrust::tuple<R0, R1, R2, R3, R4, R5, R6, R7, R8, R9>& val,
+ unsigned int tid)
+ {
+ For<0, thrust::tuple_size<thrust::tuple<P0, P1, P2, P3, P4, P5, P6, P7, P8, P9> >::value>::loadToSmem(smem, val, tid);
+ }
+ template <typename P0, typename P1, typename P2, typename P3, typename P4, typename P5, typename P6, typename P7, typename P8, typename P9,
+ typename R0, typename R1, typename R2, typename R3, typename R4, typename R5, typename R6, typename R7, typename R8, typename R9>
+ __device__ __forceinline__ void loadFromSmem(const thrust::tuple<P0, P1, P2, P3, P4, P5, P6, P7, P8, P9>& smem,
+ const thrust::tuple<R0, R1, R2, R3, R4, R5, R6, R7, R8, R9>& val,
+ unsigned int tid)
+ {
+ For<0, thrust::tuple_size<thrust::tuple<P0, P1, P2, P3, P4, P5, P6, P7, P8, P9> >::value>::loadFromSmem(smem, val, tid);
+ }
+
+ template <typename T, class Op>
+ __device__ __forceinline__ void merge(volatile T* smem, T& val, unsigned int tid, unsigned int delta, const Op& op)
+ {
+ T reg = smem[tid + delta];
+ smem[tid] = val = op(val, reg);
+ }
+ template <typename T, class Op>
+ __device__ __forceinline__ void mergeShfl(T& val, unsigned int delta, unsigned int width, const Op& op)
+ {
+ T reg = shfl_down(val, delta, width);
+ val = op(val, reg);
+ }
+ template <typename P0, typename P1, typename P2, typename P3, typename P4, typename P5, typename P6, typename P7, typename P8, typename P9,
+ typename R0, typename R1, typename R2, typename R3, typename R4, typename R5, typename R6, typename R7, typename R8, typename R9,
+ class Op0, class Op1, class Op2, class Op3, class Op4, class Op5, class Op6, class Op7, class Op8, class Op9>
+ __device__ __forceinline__ void merge(const thrust::tuple<P0, P1, P2, P3, P4, P5, P6, P7, P8, P9>& smem,
+ const thrust::tuple<R0, R1, R2, R3, R4, R5, R6, R7, R8, R9>& val,
+ unsigned int tid,
+ unsigned int delta,
+ const thrust::tuple<Op0, Op1, Op2, Op3, Op4, Op5, Op6, Op7, Op8, Op9>& op)
+ {
+ For<0, thrust::tuple_size<thrust::tuple<P0, P1, P2, P3, P4, P5, P6, P7, P8, P9> >::value>::merge(smem, val, tid, delta, op);
+ }
+ template <typename R0, typename R1, typename R2, typename R3, typename R4, typename R5, typename R6, typename R7, typename R8, typename R9,
+ class Op0, class Op1, class Op2, class Op3, class Op4, class Op5, class Op6, class Op7, class Op8, class Op9>
+ __device__ __forceinline__ void mergeShfl(const thrust::tuple<R0, R1, R2, R3, R4, R5, R6, R7, R8, R9>& val,
+ unsigned int delta,
+ unsigned int width,
+ const thrust::tuple<Op0, Op1, Op2, Op3, Op4, Op5, Op6, Op7, Op8, Op9>& op)
+ {
+ For<0, thrust::tuple_size<thrust::tuple<R0, R1, R2, R3, R4, R5, R6, R7, R8, R9> >::value>::mergeShfl(val, delta, width, op);
+ }
+
+ template <unsigned int N> struct Generic
+ {
+ template <typename Pointer, typename Reference, class Op>
+ static __device__ void reduce(Pointer smem, Reference val, unsigned int tid, Op op)
+ {
+ loadToSmem(smem, val, tid);
+ if (N >= 32)
+ __syncthreads();
+
+ if (N >= 2048)
+ {
+ if (tid < 1024)
+ merge(smem, val, tid, 1024, op);
+
+ __syncthreads();
+ }
+ if (N >= 1024)
+ {
+ if (tid < 512)
+ merge(smem, val, tid, 512, op);
+
+ __syncthreads();
+ }
+ if (N >= 512)
+ {
+ if (tid < 256)
+ merge(smem, val, tid, 256, op);
+
+ __syncthreads();
+ }
+ if (N >= 256)
+ {
+ if (tid < 128)
+ merge(smem, val, tid, 128, op);
+
+ __syncthreads();
+ }
+ if (N >= 128)
+ {
+ if (tid < 64)
+ merge(smem, val, tid, 64, op);
+
+ __syncthreads();
+ }
+ if (N >= 64)
+ {
+ if (tid < 32)
+ merge(smem, val, tid, 32, op);
+ }
+
+ if (tid < 16)
+ {
+ merge(smem, val, tid, 16, op);
+ merge(smem, val, tid, 8, op);
+ merge(smem, val, tid, 4, op);
+ merge(smem, val, tid, 2, op);
+ merge(smem, val, tid, 1, op);
+ }
+ }
+ };
+
+ template <unsigned int I, typename Pointer, typename Reference, class Op>
+ struct Unroll
+ {
+ static __device__ void loopShfl(Reference val, Op op, unsigned int N)
+ {
+ mergeShfl(val, I, N, op);
+ Unroll<I / 2, Pointer, Reference, Op>::loopShfl(val, op, N);
+ }
+ static __device__ void loop(Pointer smem, Reference val, unsigned int tid, Op op)
+ {
+ merge(smem, val, tid, I, op);
+ Unroll<I / 2, Pointer, Reference, Op>::loop(smem, val, tid, op);
+ }
+ };
+ template <typename Pointer, typename Reference, class Op>
+ struct Unroll<0, Pointer, Reference, Op>
+ {
+ static __device__ void loopShfl(Reference, Op, unsigned int)
+ {
+ }
+ static __device__ void loop(Pointer, Reference, unsigned int, Op)
+ {
+ }
+ };
+
+ template <unsigned int N> struct WarpOptimized
+ {
+ template <typename Pointer, typename Reference, class Op>
+ static __device__ void reduce(Pointer smem, Reference val, unsigned int tid, Op op)
+ {
+ #if __CUDA_ARCH__ >= 300
+ (void) smem;
+ (void) tid;
+
+ Unroll<N / 2, Pointer, Reference, Op>::loopShfl(val, op, N);
+ #else
+ loadToSmem(smem, val, tid);
+
+ if (tid < N / 2)
+ Unroll<N / 2, Pointer, Reference, Op>::loop(smem, val, tid, op);
+ #endif
+ }
+ };
+
+ template <unsigned int N> struct GenericOptimized32
+ {
+ enum { M = N / 32 };
+
+ template <typename Pointer, typename Reference, class Op>
+ static __device__ void reduce(Pointer smem, Reference val, unsigned int tid, Op op)
+ {
+ const unsigned int laneId = Warp::laneId();
+
+ #if __CUDA_ARCH__ >= 300
+ Unroll<16, Pointer, Reference, Op>::loopShfl(val, op, warpSize);
+
+ if (laneId == 0)
+ loadToSmem(smem, val, tid / 32);
+ #else
+ loadToSmem(smem, val, tid);
+
+ if (laneId < 16)
+ Unroll<16, Pointer, Reference, Op>::loop(smem, val, tid, op);
+
+ __syncthreads();
+
+ if (laneId == 0)
+ loadToSmem(smem, val, tid / 32);
+ #endif
+
+ __syncthreads();
+
+ loadFromSmem(smem, val, tid);
+
+ if (tid < 32)
+ {
+ #if __CUDA_ARCH__ >= 300
+ Unroll<M / 2, Pointer, Reference, Op>::loopShfl(val, op, M);
+ #else
+ Unroll<M / 2, Pointer, Reference, Op>::loop(smem, val, tid, op);
+ #endif
+ }
+ }
+ };
+
+ template <bool val, class T1, class T2> struct StaticIf;
+ template <class T1, class T2> struct StaticIf<true, T1, T2>
+ {
+ typedef T1 type;
+ };
+ template <class T1, class T2> struct StaticIf<false, T1, T2>
+ {
+ typedef T2 type;
+ };
+
+ template <unsigned int N> struct IsPowerOf2
+ {
+ enum { value = ((N != 0) && !(N & (N - 1))) };
+ };
+
+ template <unsigned int N> struct Dispatcher
+ {
+ typedef typename StaticIf<
+ (N <= 32) && IsPowerOf2<N>::value,
+ WarpOptimized<N>,
+ typename StaticIf<
+ (N <= 1024) && IsPowerOf2<N>::value,
+ GenericOptimized32<N>,
+ Generic<N>
+ >::type
+ >::type reductor;
+ };
+ }
+}}}
+
+#endif // __OPENCV_GPU_REDUCE_DETAIL_HPP__
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/detail/reduce_key_val.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/detail/reduce_key_val.hpp
new file mode 100644
index 00000000..a84e0c2f
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/detail/reduce_key_val.hpp
@@ -0,0 +1,498 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_GPU_PRED_VAL_REDUCE_DETAIL_HPP__
+#define __OPENCV_GPU_PRED_VAL_REDUCE_DETAIL_HPP__
+
+#include <thrust/tuple.h>
+#include "../warp.hpp"
+#include "../warp_shuffle.hpp"
+
+namespace cv { namespace gpu { namespace device
+{
+ namespace reduce_key_val_detail
+ {
+ template <typename T> struct GetType;
+ template <typename T> struct GetType<T*>
+ {
+ typedef T type;
+ };
+ template <typename T> struct GetType<volatile T*>
+ {
+ typedef T type;
+ };
+ template <typename T> struct GetType<T&>
+ {
+ typedef T type;
+ };
+
+ template <unsigned int I, unsigned int N>
+ struct For
+ {
+ template <class PointerTuple, class ReferenceTuple>
+ static __device__ void loadToSmem(const PointerTuple& smem, const ReferenceTuple& data, unsigned int tid)
+ {
+ thrust::get<I>(smem)[tid] = thrust::get<I>(data);
+
+ For<I + 1, N>::loadToSmem(smem, data, tid);
+ }
+ template <class PointerTuple, class ReferenceTuple>
+ static __device__ void loadFromSmem(const PointerTuple& smem, const ReferenceTuple& data, unsigned int tid)
+ {
+ thrust::get<I>(data) = thrust::get<I>(smem)[tid];
+
+ For<I + 1, N>::loadFromSmem(smem, data, tid);
+ }
+
+ template <class ReferenceTuple>
+ static __device__ void copyShfl(const ReferenceTuple& val, unsigned int delta, int width)
+ {
+ thrust::get<I>(val) = shfl_down(thrust::get<I>(val), delta, width);
+
+ For<I + 1, N>::copyShfl(val, delta, width);
+ }
+ template <class PointerTuple, class ReferenceTuple>
+ static __device__ void copy(const PointerTuple& svals, const ReferenceTuple& val, unsigned int tid, unsigned int delta)
+ {
+ thrust::get<I>(svals)[tid] = thrust::get<I>(val) = thrust::get<I>(svals)[tid + delta];
+
+ For<I + 1, N>::copy(svals, val, tid, delta);
+ }
+
+ template <class KeyReferenceTuple, class ValReferenceTuple, class CmpTuple>
+ static __device__ void mergeShfl(const KeyReferenceTuple& key, const ValReferenceTuple& val, const CmpTuple& cmp, unsigned int delta, int width)
+ {
+ typename GetType<typename thrust::tuple_element<I, KeyReferenceTuple>::type>::type reg = shfl_down(thrust::get<I>(key), delta, width);
+
+ if (thrust::get<I>(cmp)(reg, thrust::get<I>(key)))
+ {
+ thrust::get<I>(key) = reg;
+ thrust::get<I>(val) = shfl_down(thrust::get<I>(val), delta, width);
+ }
+
+ For<I + 1, N>::mergeShfl(key, val, cmp, delta, width);
+ }
+ template <class KeyPointerTuple, class KeyReferenceTuple, class ValPointerTuple, class ValReferenceTuple, class CmpTuple>
+ static __device__ void merge(const KeyPointerTuple& skeys, const KeyReferenceTuple& key,
+ const ValPointerTuple& svals, const ValReferenceTuple& val,
+ const CmpTuple& cmp,
+ unsigned int tid, unsigned int delta)
+ {
+ typename GetType<typename thrust::tuple_element<I, KeyPointerTuple>::type>::type reg = thrust::get<I>(skeys)[tid + delta];
+
+ if (thrust::get<I>(cmp)(reg, thrust::get<I>(key)))
+ {
+ thrust::get<I>(skeys)[tid] = thrust::get<I>(key) = reg;
+ thrust::get<I>(svals)[tid] = thrust::get<I>(val) = thrust::get<I>(svals)[tid + delta];
+ }
+
+ For<I + 1, N>::merge(skeys, key, svals, val, cmp, tid, delta);
+ }
+ };
+ template <unsigned int N>
+ struct For<N, N>
+ {
+ template <class PointerTuple, class ReferenceTuple>
+ static __device__ void loadToSmem(const PointerTuple&, const ReferenceTuple&, unsigned int)
+ {
+ }
+ template <class PointerTuple, class ReferenceTuple>
+ static __device__ void loadFromSmem(const PointerTuple&, const ReferenceTuple&, unsigned int)
+ {
+ }
+
+ template <class ReferenceTuple>
+ static __device__ void copyShfl(const ReferenceTuple&, unsigned int, int)
+ {
+ }
+ template <class PointerTuple, class ReferenceTuple>
+ static __device__ void copy(const PointerTuple&, const ReferenceTuple&, unsigned int, unsigned int)
+ {
+ }
+
+ template <class KeyReferenceTuple, class ValReferenceTuple, class CmpTuple>
+ static __device__ void mergeShfl(const KeyReferenceTuple&, const ValReferenceTuple&, const CmpTuple&, unsigned int, int)
+ {
+ }
+ template <class KeyPointerTuple, class KeyReferenceTuple, class ValPointerTuple, class ValReferenceTuple, class CmpTuple>
+ static __device__ void merge(const KeyPointerTuple&, const KeyReferenceTuple&,
+ const ValPointerTuple&, const ValReferenceTuple&,
+ const CmpTuple&,
+ unsigned int, unsigned int)
+ {
+ }
+ };
+
+ //////////////////////////////////////////////////////
+ // loadToSmem
+
+ template <typename T>
+ __device__ __forceinline__ void loadToSmem(volatile T* smem, T& data, unsigned int tid)
+ {
+ smem[tid] = data;
+ }
+ template <typename T>
+ __device__ __forceinline__ void loadFromSmem(volatile T* smem, T& data, unsigned int tid)
+ {
+ data = smem[tid];
+ }
+ template <typename VP0, typename VP1, typename VP2, typename VP3, typename VP4, typename VP5, typename VP6, typename VP7, typename VP8, typename VP9,
+ typename VR0, typename VR1, typename VR2, typename VR3, typename VR4, typename VR5, typename VR6, typename VR7, typename VR8, typename VR9>
+ __device__ __forceinline__ void loadToSmem(const thrust::tuple<VP0, VP1, VP2, VP3, VP4, VP5, VP6, VP7, VP8, VP9>& smem,
+ const thrust::tuple<VR0, VR1, VR2, VR3, VR4, VR5, VR6, VR7, VR8, VR9>& data,
+ unsigned int tid)
+ {
+ For<0, thrust::tuple_size<thrust::tuple<VP0, VP1, VP2, VP3, VP4, VP5, VP6, VP7, VP8, VP9> >::value>::loadToSmem(smem, data, tid);
+ }
+ template <typename VP0, typename VP1, typename VP2, typename VP3, typename VP4, typename VP5, typename VP6, typename VP7, typename VP8, typename VP9,
+ typename VR0, typename VR1, typename VR2, typename VR3, typename VR4, typename VR5, typename VR6, typename VR7, typename VR8, typename VR9>
+ __device__ __forceinline__ void loadFromSmem(const thrust::tuple<VP0, VP1, VP2, VP3, VP4, VP5, VP6, VP7, VP8, VP9>& smem,
+ const thrust::tuple<VR0, VR1, VR2, VR3, VR4, VR5, VR6, VR7, VR8, VR9>& data,
+ unsigned int tid)
+ {
+ For<0, thrust::tuple_size<thrust::tuple<VP0, VP1, VP2, VP3, VP4, VP5, VP6, VP7, VP8, VP9> >::value>::loadFromSmem(smem, data, tid);
+ }
+
+ //////////////////////////////////////////////////////
+ // copyVals
+
+ template <typename V>
+ __device__ __forceinline__ void copyValsShfl(V& val, unsigned int delta, int width)
+ {
+ val = shfl_down(val, delta, width);
+ }
+ template <typename V>
+ __device__ __forceinline__ void copyVals(volatile V* svals, V& val, unsigned int tid, unsigned int delta)
+ {
+ svals[tid] = val = svals[tid + delta];
+ }
+ template <typename VR0, typename VR1, typename VR2, typename VR3, typename VR4, typename VR5, typename VR6, typename VR7, typename VR8, typename VR9>
+ __device__ __forceinline__ void copyValsShfl(const thrust::tuple<VR0, VR1, VR2, VR3, VR4, VR5, VR6, VR7, VR8, VR9>& val,
+ unsigned int delta,
+ int width)
+ {
+ For<0, thrust::tuple_size<thrust::tuple<VR0, VR1, VR2, VR3, VR4, VR5, VR6, VR7, VR8, VR9> >::value>::copyShfl(val, delta, width);
+ }
+ template <typename VP0, typename VP1, typename VP2, typename VP3, typename VP4, typename VP5, typename VP6, typename VP7, typename VP8, typename VP9,
+ typename VR0, typename VR1, typename VR2, typename VR3, typename VR4, typename VR5, typename VR6, typename VR7, typename VR8, typename VR9>
+ __device__ __forceinline__ void copyVals(const thrust::tuple<VP0, VP1, VP2, VP3, VP4, VP5, VP6, VP7, VP8, VP9>& svals,
+ const thrust::tuple<VR0, VR1, VR2, VR3, VR4, VR5, VR6, VR7, VR8, VR9>& val,
+ unsigned int tid, unsigned int delta)
+ {
+ For<0, thrust::tuple_size<thrust::tuple<VP0, VP1, VP2, VP3, VP4, VP5, VP6, VP7, VP8, VP9> >::value>::copy(svals, val, tid, delta);
+ }
+
+ //////////////////////////////////////////////////////
+ // merge
+
+ template <typename K, typename V, class Cmp>
+ __device__ __forceinline__ void mergeShfl(K& key, V& val, const Cmp& cmp, unsigned int delta, int width)
+ {
+ K reg = shfl_down(key, delta, width);
+
+ if (cmp(reg, key))
+ {
+ key = reg;
+ copyValsShfl(val, delta, width);
+ }
+ }
+ template <typename K, typename V, class Cmp>
+ __device__ __forceinline__ void merge(volatile K* skeys, K& key, volatile V* svals, V& val, const Cmp& cmp, unsigned int tid, unsigned int delta)
+ {
+ K reg = skeys[tid + delta];
+
+ if (cmp(reg, key))
+ {
+ skeys[tid] = key = reg;
+ copyVals(svals, val, tid, delta);
+ }
+ }
+ template <typename K,
+ typename VR0, typename VR1, typename VR2, typename VR3, typename VR4, typename VR5, typename VR6, typename VR7, typename VR8, typename VR9,
+ class Cmp>
+ __device__ __forceinline__ void mergeShfl(K& key,
+ const thrust::tuple<VR0, VR1, VR2, VR3, VR4, VR5, VR6, VR7, VR8, VR9>& val,
+ const Cmp& cmp,
+ unsigned int delta, int width)
+ {
+ K reg = shfl_down(key, delta, width);
+
+ if (cmp(reg, key))
+ {
+ key = reg;
+ copyValsShfl(val, delta, width);
+ }
+ }
+ template <typename K,
+ typename VP0, typename VP1, typename VP2, typename VP3, typename VP4, typename VP5, typename VP6, typename VP7, typename VP8, typename VP9,
+ typename VR0, typename VR1, typename VR2, typename VR3, typename VR4, typename VR5, typename VR6, typename VR7, typename VR8, typename VR9,
+ class Cmp>
+ __device__ __forceinline__ void merge(volatile K* skeys, K& key,
+ const thrust::tuple<VP0, VP1, VP2, VP3, VP4, VP5, VP6, VP7, VP8, VP9>& svals,
+ const thrust::tuple<VR0, VR1, VR2, VR3, VR4, VR5, VR6, VR7, VR8, VR9>& val,
+ const Cmp& cmp, unsigned int tid, unsigned int delta)
+ {
+ K reg = skeys[tid + delta];
+
+ if (cmp(reg, key))
+ {
+ skeys[tid] = key = reg;
+ copyVals(svals, val, tid, delta);
+ }
+ }
+ template <typename KR0, typename KR1, typename KR2, typename KR3, typename KR4, typename KR5, typename KR6, typename KR7, typename KR8, typename KR9,
+ typename VR0, typename VR1, typename VR2, typename VR3, typename VR4, typename VR5, typename VR6, typename VR7, typename VR8, typename VR9,
+ class Cmp0, class Cmp1, class Cmp2, class Cmp3, class Cmp4, class Cmp5, class Cmp6, class Cmp7, class Cmp8, class Cmp9>
+ __device__ __forceinline__ void mergeShfl(const thrust::tuple<KR0, KR1, KR2, KR3, KR4, KR5, KR6, KR7, KR8, KR9>& key,
+ const thrust::tuple<VR0, VR1, VR2, VR3, VR4, VR5, VR6, VR7, VR8, VR9>& val,
+ const thrust::tuple<Cmp0, Cmp1, Cmp2, Cmp3, Cmp4, Cmp5, Cmp6, Cmp7, Cmp8, Cmp9>& cmp,
+ unsigned int delta, int width)
+ {
+ For<0, thrust::tuple_size<thrust::tuple<KR0, KR1, KR2, KR3, KR4, KR5, KR6, KR7, KR8, KR9> >::value>::mergeShfl(key, val, cmp, delta, width);
+ }
+ template <typename KP0, typename KP1, typename KP2, typename KP3, typename KP4, typename KP5, typename KP6, typename KP7, typename KP8, typename KP9,
+ typename KR0, typename KR1, typename KR2, typename KR3, typename KR4, typename KR5, typename KR6, typename KR7, typename KR8, typename KR9,
+ typename VP0, typename VP1, typename VP2, typename VP3, typename VP4, typename VP5, typename VP6, typename VP7, typename VP8, typename VP9,
+ typename VR0, typename VR1, typename VR2, typename VR3, typename VR4, typename VR5, typename VR6, typename VR7, typename VR8, typename VR9,
+ class Cmp0, class Cmp1, class Cmp2, class Cmp3, class Cmp4, class Cmp5, class Cmp6, class Cmp7, class Cmp8, class Cmp9>
+ __device__ __forceinline__ void merge(const thrust::tuple<KP0, KP1, KP2, KP3, KP4, KP5, KP6, KP7, KP8, KP9>& skeys,
+ const thrust::tuple<KR0, KR1, KR2, KR3, KR4, KR5, KR6, KR7, KR8, KR9>& key,
+ const thrust::tuple<VP0, VP1, VP2, VP3, VP4, VP5, VP6, VP7, VP8, VP9>& svals,
+ const thrust::tuple<VR0, VR1, VR2, VR3, VR4, VR5, VR6, VR7, VR8, VR9>& val,
+ const thrust::tuple<Cmp0, Cmp1, Cmp2, Cmp3, Cmp4, Cmp5, Cmp6, Cmp7, Cmp8, Cmp9>& cmp,
+ unsigned int tid, unsigned int delta)
+ {
+ For<0, thrust::tuple_size<thrust::tuple<VP0, VP1, VP2, VP3, VP4, VP5, VP6, VP7, VP8, VP9> >::value>::merge(skeys, key, svals, val, cmp, tid, delta);
+ }
+
+ //////////////////////////////////////////////////////
+ // Generic
+
+ template <unsigned int N> struct Generic
+ {
+ template <class KP, class KR, class VP, class VR, class Cmp>
+ static __device__ void reduce(KP skeys, KR key, VP svals, VR val, unsigned int tid, Cmp cmp)
+ {
+ loadToSmem(skeys, key, tid);
+ loadValsToSmem(svals, val, tid);
+ if (N >= 32)
+ __syncthreads();
+
+ if (N >= 2048)
+ {
+ if (tid < 1024)
+ merge(skeys, key, svals, val, cmp, tid, 1024);
+
+ __syncthreads();
+ }
+ if (N >= 1024)
+ {
+ if (tid < 512)
+ merge(skeys, key, svals, val, cmp, tid, 512);
+
+ __syncthreads();
+ }
+ if (N >= 512)
+ {
+ if (tid < 256)
+ merge(skeys, key, svals, val, cmp, tid, 256);
+
+ __syncthreads();
+ }
+ if (N >= 256)
+ {
+ if (tid < 128)
+ merge(skeys, key, svals, val, cmp, tid, 128);
+
+ __syncthreads();
+ }
+ if (N >= 128)
+ {
+ if (tid < 64)
+ merge(skeys, key, svals, val, cmp, tid, 64);
+
+ __syncthreads();
+ }
+ if (N >= 64)
+ {
+ if (tid < 32)
+ merge(skeys, key, svals, val, cmp, tid, 32);
+ }
+
+ if (tid < 16)
+ {
+ merge(skeys, key, svals, val, cmp, tid, 16);
+ merge(skeys, key, svals, val, cmp, tid, 8);
+ merge(skeys, key, svals, val, cmp, tid, 4);
+ merge(skeys, key, svals, val, cmp, tid, 2);
+ merge(skeys, key, svals, val, cmp, tid, 1);
+ }
+ }
+ };
+
+ template <unsigned int I, class KP, class KR, class VP, class VR, class Cmp>
+ struct Unroll
+ {
+ static __device__ void loopShfl(KR key, VR val, Cmp cmp, unsigned int N)
+ {
+ mergeShfl(key, val, cmp, I, N);
+ Unroll<I / 2, KP, KR, VP, VR, Cmp>::loopShfl(key, val, cmp, N);
+ }
+ static __device__ void loop(KP skeys, KR key, VP svals, VR val, unsigned int tid, Cmp cmp)
+ {
+ merge(skeys, key, svals, val, cmp, tid, I);
+ Unroll<I / 2, KP, KR, VP, VR, Cmp>::loop(skeys, key, svals, val, tid, cmp);
+ }
+ };
+ template <class KP, class KR, class VP, class VR, class Cmp>
+ struct Unroll<0, KP, KR, VP, VR, Cmp>
+ {
+ static __device__ void loopShfl(KR, VR, Cmp, unsigned int)
+ {
+ }
+ static __device__ void loop(KP, KR, VP, VR, unsigned int, Cmp)
+ {
+ }
+ };
+
+ template <unsigned int N> struct WarpOptimized
+ {
+ template <class KP, class KR, class VP, class VR, class Cmp>
+ static __device__ void reduce(KP skeys, KR key, VP svals, VR val, unsigned int tid, Cmp cmp)
+ {
+ #if 0 // __CUDA_ARCH__ >= 300
+ (void) skeys;
+ (void) svals;
+ (void) tid;
+
+ Unroll<N / 2, KP, KR, VP, VR, Cmp>::loopShfl(key, val, cmp, N);
+ #else
+ loadToSmem(skeys, key, tid);
+ loadToSmem(svals, val, tid);
+
+ if (tid < N / 2)
+ Unroll<N / 2, KP, KR, VP, VR, Cmp>::loop(skeys, key, svals, val, tid, cmp);
+ #endif
+ }
+ };
+
+ template <unsigned int N> struct GenericOptimized32
+ {
+ enum { M = N / 32 };
+
+ template <class KP, class KR, class VP, class VR, class Cmp>
+ static __device__ void reduce(KP skeys, KR key, VP svals, VR val, unsigned int tid, Cmp cmp)
+ {
+ const unsigned int laneId = Warp::laneId();
+
+ #if 0 // __CUDA_ARCH__ >= 300
+ Unroll<16, KP, KR, VP, VR, Cmp>::loopShfl(key, val, cmp, warpSize);
+
+ if (laneId == 0)
+ {
+ loadToSmem(skeys, key, tid / 32);
+ loadToSmem(svals, val, tid / 32);
+ }
+ #else
+ loadToSmem(skeys, key, tid);
+ loadToSmem(svals, val, tid);
+
+ if (laneId < 16)
+ Unroll<16, KP, KR, VP, VR, Cmp>::loop(skeys, key, svals, val, tid, cmp);
+
+ __syncthreads();
+
+ if (laneId == 0)
+ {
+ loadToSmem(skeys, key, tid / 32);
+ loadToSmem(svals, val, tid / 32);
+ }
+ #endif
+
+ __syncthreads();
+
+ loadFromSmem(skeys, key, tid);
+
+ if (tid < 32)
+ {
+ #if 0 // __CUDA_ARCH__ >= 300
+ loadFromSmem(svals, val, tid);
+
+ Unroll<M / 2, KP, KR, VP, VR, Cmp>::loopShfl(key, val, cmp, M);
+ #else
+ Unroll<M / 2, KP, KR, VP, VR, Cmp>::loop(skeys, key, svals, val, tid, cmp);
+ #endif
+ }
+ }
+ };
+
+ template <bool val, class T1, class T2> struct StaticIf;
+ template <class T1, class T2> struct StaticIf<true, T1, T2>
+ {
+ typedef T1 type;
+ };
+ template <class T1, class T2> struct StaticIf<false, T1, T2>
+ {
+ typedef T2 type;
+ };
+
+ template <unsigned int N> struct IsPowerOf2
+ {
+ enum { value = ((N != 0) && !(N & (N - 1))) };
+ };
+
+ template <unsigned int N> struct Dispatcher
+ {
+ typedef typename StaticIf<
+ (N <= 32) && IsPowerOf2<N>::value,
+ WarpOptimized<N>,
+ typename StaticIf<
+ (N <= 1024) && IsPowerOf2<N>::value,
+ GenericOptimized32<N>,
+ Generic<N>
+ >::type
+ >::type reductor;
+ };
+ }
+}}}
+
+#endif // __OPENCV_GPU_PRED_VAL_REDUCE_DETAIL_HPP__
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/detail/transform_detail.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/detail/transform_detail.hpp
new file mode 100644
index 00000000..10da5938
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/detail/transform_detail.hpp
@@ -0,0 +1,395 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_GPU_TRANSFORM_DETAIL_HPP__
+#define __OPENCV_GPU_TRANSFORM_DETAIL_HPP__
+
+#include "../common.hpp"
+#include "../vec_traits.hpp"
+#include "../functional.hpp"
+
+namespace cv { namespace gpu { namespace device
+{
+ namespace transform_detail
+ {
+ //! Read Write Traits
+
+ template <typename T, typename D, int shift> struct UnaryReadWriteTraits
+ {
+ typedef typename TypeVec<T, shift>::vec_type read_type;
+ typedef typename TypeVec<D, shift>::vec_type write_type;
+ };
+
+ template <typename T1, typename T2, typename D, int shift> struct BinaryReadWriteTraits
+ {
+ typedef typename TypeVec<T1, shift>::vec_type read_type1;
+ typedef typename TypeVec<T2, shift>::vec_type read_type2;
+ typedef typename TypeVec<D, shift>::vec_type write_type;
+ };
+
+ //! Transform kernels
+
+ template <int shift> struct OpUnroller;
+ template <> struct OpUnroller<1>
+ {
+ template <typename T, typename D, typename UnOp, typename Mask>
+ static __device__ __forceinline__ void unroll(const T& src, D& dst, const Mask& mask, UnOp& op, int x_shifted, int y)
+ {
+ if (mask(y, x_shifted))
+ dst.x = op(src.x);
+ }
+
+ template <typename T1, typename T2, typename D, typename BinOp, typename Mask>
+ static __device__ __forceinline__ void unroll(const T1& src1, const T2& src2, D& dst, const Mask& mask, BinOp& op, int x_shifted, int y)
+ {
+ if (mask(y, x_shifted))
+ dst.x = op(src1.x, src2.x);
+ }
+ };
+ template <> struct OpUnroller<2>
+ {
+ template <typename T, typename D, typename UnOp, typename Mask>
+ static __device__ __forceinline__ void unroll(const T& src, D& dst, const Mask& mask, UnOp& op, int x_shifted, int y)
+ {
+ if (mask(y, x_shifted))
+ dst.x = op(src.x);
+ if (mask(y, x_shifted + 1))
+ dst.y = op(src.y);
+ }
+
+ template <typename T1, typename T2, typename D, typename BinOp, typename Mask>
+ static __device__ __forceinline__ void unroll(const T1& src1, const T2& src2, D& dst, const Mask& mask, BinOp& op, int x_shifted, int y)
+ {
+ if (mask(y, x_shifted))
+ dst.x = op(src1.x, src2.x);
+ if (mask(y, x_shifted + 1))
+ dst.y = op(src1.y, src2.y);
+ }
+ };
+ template <> struct OpUnroller<3>
+ {
+ template <typename T, typename D, typename UnOp, typename Mask>
+ static __device__ __forceinline__ void unroll(const T& src, D& dst, const Mask& mask, const UnOp& op, int x_shifted, int y)
+ {
+ if (mask(y, x_shifted))
+ dst.x = op(src.x);
+ if (mask(y, x_shifted + 1))
+ dst.y = op(src.y);
+ if (mask(y, x_shifted + 2))
+ dst.z = op(src.z);
+ }
+
+ template <typename T1, typename T2, typename D, typename BinOp, typename Mask>
+ static __device__ __forceinline__ void unroll(const T1& src1, const T2& src2, D& dst, const Mask& mask, const BinOp& op, int x_shifted, int y)
+ {
+ if (mask(y, x_shifted))
+ dst.x = op(src1.x, src2.x);
+ if (mask(y, x_shifted + 1))
+ dst.y = op(src1.y, src2.y);
+ if (mask(y, x_shifted + 2))
+ dst.z = op(src1.z, src2.z);
+ }
+ };
+ template <> struct OpUnroller<4>
+ {
+ template <typename T, typename D, typename UnOp, typename Mask>
+ static __device__ __forceinline__ void unroll(const T& src, D& dst, const Mask& mask, const UnOp& op, int x_shifted, int y)
+ {
+ if (mask(y, x_shifted))
+ dst.x = op(src.x);
+ if (mask(y, x_shifted + 1))
+ dst.y = op(src.y);
+ if (mask(y, x_shifted + 2))
+ dst.z = op(src.z);
+ if (mask(y, x_shifted + 3))
+ dst.w = op(src.w);
+ }
+
+ template <typename T1, typename T2, typename D, typename BinOp, typename Mask>
+ static __device__ __forceinline__ void unroll(const T1& src1, const T2& src2, D& dst, const Mask& mask, const BinOp& op, int x_shifted, int y)
+ {
+ if (mask(y, x_shifted))
+ dst.x = op(src1.x, src2.x);
+ if (mask(y, x_shifted + 1))
+ dst.y = op(src1.y, src2.y);
+ if (mask(y, x_shifted + 2))
+ dst.z = op(src1.z, src2.z);
+ if (mask(y, x_shifted + 3))
+ dst.w = op(src1.w, src2.w);
+ }
+ };
+ template <> struct OpUnroller<8>
+ {
+ template <typename T, typename D, typename UnOp, typename Mask>
+ static __device__ __forceinline__ void unroll(const T& src, D& dst, const Mask& mask, const UnOp& op, int x_shifted, int y)
+ {
+ if (mask(y, x_shifted))
+ dst.a0 = op(src.a0);
+ if (mask(y, x_shifted + 1))
+ dst.a1 = op(src.a1);
+ if (mask(y, x_shifted + 2))
+ dst.a2 = op(src.a2);
+ if (mask(y, x_shifted + 3))
+ dst.a3 = op(src.a3);
+ if (mask(y, x_shifted + 4))
+ dst.a4 = op(src.a4);
+ if (mask(y, x_shifted + 5))
+ dst.a5 = op(src.a5);
+ if (mask(y, x_shifted + 6))
+ dst.a6 = op(src.a6);
+ if (mask(y, x_shifted + 7))
+ dst.a7 = op(src.a7);
+ }
+
+ template <typename T1, typename T2, typename D, typename BinOp, typename Mask>
+ static __device__ __forceinline__ void unroll(const T1& src1, const T2& src2, D& dst, const Mask& mask, const BinOp& op, int x_shifted, int y)
+ {
+ if (mask(y, x_shifted))
+ dst.a0 = op(src1.a0, src2.a0);
+ if (mask(y, x_shifted + 1))
+ dst.a1 = op(src1.a1, src2.a1);
+ if (mask(y, x_shifted + 2))
+ dst.a2 = op(src1.a2, src2.a2);
+ if (mask(y, x_shifted + 3))
+ dst.a3 = op(src1.a3, src2.a3);
+ if (mask(y, x_shifted + 4))
+ dst.a4 = op(src1.a4, src2.a4);
+ if (mask(y, x_shifted + 5))
+ dst.a5 = op(src1.a5, src2.a5);
+ if (mask(y, x_shifted + 6))
+ dst.a6 = op(src1.a6, src2.a6);
+ if (mask(y, x_shifted + 7))
+ dst.a7 = op(src1.a7, src2.a7);
+ }
+ };
+
+ template <typename T, typename D, typename UnOp, typename Mask>
+ static __global__ void transformSmart(const PtrStepSz<T> src_, PtrStep<D> dst_, const Mask mask, const UnOp op)
+ {
+ typedef TransformFunctorTraits<UnOp> ft;
+ typedef typename UnaryReadWriteTraits<T, D, ft::smart_shift>::read_type read_type;
+ typedef typename UnaryReadWriteTraits<T, D, ft::smart_shift>::write_type write_type;
+
+ const int x = threadIdx.x + blockIdx.x * blockDim.x;
+ const int y = threadIdx.y + blockIdx.y * blockDim.y;
+ const int x_shifted = x * ft::smart_shift;
+
+ if (y < src_.rows)
+ {
+ const T* src = src_.ptr(y);
+ D* dst = dst_.ptr(y);
+
+ if (x_shifted + ft::smart_shift - 1 < src_.cols)
+ {
+ const read_type src_n_el = ((const read_type*)src)[x];
+ write_type dst_n_el = ((const write_type*)dst)[x];
+
+ OpUnroller<ft::smart_shift>::unroll(src_n_el, dst_n_el, mask, op, x_shifted, y);
+
+ ((write_type*)dst)[x] = dst_n_el;
+ }
+ else
+ {
+ for (int real_x = x_shifted; real_x < src_.cols; ++real_x)
+ {
+ if (mask(y, real_x))
+ dst[real_x] = op(src[real_x]);
+ }
+ }
+ }
+ }
+
+ template <typename T, typename D, typename UnOp, typename Mask>
+ __global__ static void transformSimple(const PtrStepSz<T> src, PtrStep<D> dst, const Mask mask, const UnOp op)
+ {
+ const int x = blockDim.x * blockIdx.x + threadIdx.x;
+ const int y = blockDim.y * blockIdx.y + threadIdx.y;
+
+ if (x < src.cols && y < src.rows && mask(y, x))
+ {
+ dst.ptr(y)[x] = op(src.ptr(y)[x]);
+ }
+ }
+
+ template <typename T1, typename T2, typename D, typename BinOp, typename Mask>
+ static __global__ void transformSmart(const PtrStepSz<T1> src1_, const PtrStep<T2> src2_, PtrStep<D> dst_,
+ const Mask mask, const BinOp op)
+ {
+ typedef TransformFunctorTraits<BinOp> ft;
+ typedef typename BinaryReadWriteTraits<T1, T2, D, ft::smart_shift>::read_type1 read_type1;
+ typedef typename BinaryReadWriteTraits<T1, T2, D, ft::smart_shift>::read_type2 read_type2;
+ typedef typename BinaryReadWriteTraits<T1, T2, D, ft::smart_shift>::write_type write_type;
+
+ const int x = threadIdx.x + blockIdx.x * blockDim.x;
+ const int y = threadIdx.y + blockIdx.y * blockDim.y;
+ const int x_shifted = x * ft::smart_shift;
+
+ if (y < src1_.rows)
+ {
+ const T1* src1 = src1_.ptr(y);
+ const T2* src2 = src2_.ptr(y);
+ D* dst = dst_.ptr(y);
+
+ if (x_shifted + ft::smart_shift - 1 < src1_.cols)
+ {
+ const read_type1 src1_n_el = ((const read_type1*)src1)[x];
+ const read_type2 src2_n_el = ((const read_type2*)src2)[x];
+ write_type dst_n_el = ((const write_type*)dst)[x];
+
+ OpUnroller<ft::smart_shift>::unroll(src1_n_el, src2_n_el, dst_n_el, mask, op, x_shifted, y);
+
+ ((write_type*)dst)[x] = dst_n_el;
+ }
+ else
+ {
+ for (int real_x = x_shifted; real_x < src1_.cols; ++real_x)
+ {
+ if (mask(y, real_x))
+ dst[real_x] = op(src1[real_x], src2[real_x]);
+ }
+ }
+ }
+ }
+
+ template <typename T1, typename T2, typename D, typename BinOp, typename Mask>
+ static __global__ void transformSimple(const PtrStepSz<T1> src1, const PtrStep<T2> src2, PtrStep<D> dst,
+ const Mask mask, const BinOp op)
+ {
+ const int x = blockDim.x * blockIdx.x + threadIdx.x;
+ const int y = blockDim.y * blockIdx.y + threadIdx.y;
+
+ if (x < src1.cols && y < src1.rows && mask(y, x))
+ {
+ const T1 src1_data = src1.ptr(y)[x];
+ const T2 src2_data = src2.ptr(y)[x];
+ dst.ptr(y)[x] = op(src1_data, src2_data);
+ }
+ }
+
+ template <bool UseSmart> struct TransformDispatcher;
+ template<> struct TransformDispatcher<false>
+ {
+ template <typename T, typename D, typename UnOp, typename Mask>
+ static void call(PtrStepSz<T> src, PtrStepSz<D> dst, UnOp op, Mask mask, cudaStream_t stream)
+ {
+ typedef TransformFunctorTraits<UnOp> ft;
+
+ const dim3 threads(ft::simple_block_dim_x, ft::simple_block_dim_y, 1);
+ const dim3 grid(divUp(src.cols, threads.x), divUp(src.rows, threads.y), 1);
+
+ transformSimple<T, D><<<grid, threads, 0, stream>>>(src, dst, mask, op);
+ cudaSafeCall( cudaGetLastError() );
+
+ if (stream == 0)
+ cudaSafeCall( cudaDeviceSynchronize() );
+ }
+
+ template <typename T1, typename T2, typename D, typename BinOp, typename Mask>
+ static void call(PtrStepSz<T1> src1, PtrStepSz<T2> src2, PtrStepSz<D> dst, BinOp op, Mask mask, cudaStream_t stream)
+ {
+ typedef TransformFunctorTraits<BinOp> ft;
+
+ const dim3 threads(ft::simple_block_dim_x, ft::simple_block_dim_y, 1);
+ const dim3 grid(divUp(src1.cols, threads.x), divUp(src1.rows, threads.y), 1);
+
+ transformSimple<T1, T2, D><<<grid, threads, 0, stream>>>(src1, src2, dst, mask, op);
+ cudaSafeCall( cudaGetLastError() );
+
+ if (stream == 0)
+ cudaSafeCall( cudaDeviceSynchronize() );
+ }
+ };
+ template<> struct TransformDispatcher<true>
+ {
+ template <typename T, typename D, typename UnOp, typename Mask>
+ static void call(PtrStepSz<T> src, PtrStepSz<D> dst, UnOp op, Mask mask, cudaStream_t stream)
+ {
+ typedef TransformFunctorTraits<UnOp> ft;
+
+ StaticAssert<ft::smart_shift != 1>::check();
+
+ if (!isAligned(src.data, ft::smart_shift * sizeof(T)) || !isAligned(src.step, ft::smart_shift * sizeof(T)) ||
+ !isAligned(dst.data, ft::smart_shift * sizeof(D)) || !isAligned(dst.step, ft::smart_shift * sizeof(D)))
+ {
+ TransformDispatcher<false>::call(src, dst, op, mask, stream);
+ return;
+ }
+
+ const dim3 threads(ft::smart_block_dim_x, ft::smart_block_dim_y, 1);
+ const dim3 grid(divUp(src.cols, threads.x * ft::smart_shift), divUp(src.rows, threads.y), 1);
+
+ transformSmart<T, D><<<grid, threads, 0, stream>>>(src, dst, mask, op);
+ cudaSafeCall( cudaGetLastError() );
+
+ if (stream == 0)
+ cudaSafeCall( cudaDeviceSynchronize() );
+ }
+
+ template <typename T1, typename T2, typename D, typename BinOp, typename Mask>
+ static void call(PtrStepSz<T1> src1, PtrStepSz<T2> src2, PtrStepSz<D> dst, BinOp op, Mask mask, cudaStream_t stream)
+ {
+ typedef TransformFunctorTraits<BinOp> ft;
+
+ StaticAssert<ft::smart_shift != 1>::check();
+
+ if (!isAligned(src1.data, ft::smart_shift * sizeof(T1)) || !isAligned(src1.step, ft::smart_shift * sizeof(T1)) ||
+ !isAligned(src2.data, ft::smart_shift * sizeof(T2)) || !isAligned(src2.step, ft::smart_shift * sizeof(T2)) ||
+ !isAligned(dst.data, ft::smart_shift * sizeof(D)) || !isAligned(dst.step, ft::smart_shift * sizeof(D)))
+ {
+ TransformDispatcher<false>::call(src1, src2, dst, op, mask, stream);
+ return;
+ }
+
+ const dim3 threads(ft::smart_block_dim_x, ft::smart_block_dim_y, 1);
+ const dim3 grid(divUp(src1.cols, threads.x * ft::smart_shift), divUp(src1.rows, threads.y), 1);
+
+ transformSmart<T1, T2, D><<<grid, threads, 0, stream>>>(src1, src2, dst, mask, op);
+ cudaSafeCall( cudaGetLastError() );
+
+ if (stream == 0)
+ cudaSafeCall( cudaDeviceSynchronize() );
+ }
+ };
+ } // namespace transform_detail
+}}} // namespace cv { namespace gpu { namespace device
+
+#endif // __OPENCV_GPU_TRANSFORM_DETAIL_HPP__
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/detail/type_traits_detail.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/detail/type_traits_detail.hpp
new file mode 100644
index 00000000..97ff00d8
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/detail/type_traits_detail.hpp
@@ -0,0 +1,187 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_GPU_TYPE_TRAITS_DETAIL_HPP__
+#define __OPENCV_GPU_TYPE_TRAITS_DETAIL_HPP__
+
+#include "../common.hpp"
+#include "../vec_traits.hpp"
+
+namespace cv { namespace gpu { namespace device
+{
+ namespace type_traits_detail
+ {
+ template <bool, typename T1, typename T2> struct Select { typedef T1 type; };
+ template <typename T1, typename T2> struct Select<false, T1, T2> { typedef T2 type; };
+
+ template <typename T> struct IsSignedIntergral { enum {value = 0}; };
+ template <> struct IsSignedIntergral<schar> { enum {value = 1}; };
+ template <> struct IsSignedIntergral<char1> { enum {value = 1}; };
+ template <> struct IsSignedIntergral<short> { enum {value = 1}; };
+ template <> struct IsSignedIntergral<short1> { enum {value = 1}; };
+ template <> struct IsSignedIntergral<int> { enum {value = 1}; };
+ template <> struct IsSignedIntergral<int1> { enum {value = 1}; };
+
+ template <typename T> struct IsUnsignedIntegral { enum {value = 0}; };
+ template <> struct IsUnsignedIntegral<uchar> { enum {value = 1}; };
+ template <> struct IsUnsignedIntegral<uchar1> { enum {value = 1}; };
+ template <> struct IsUnsignedIntegral<ushort> { enum {value = 1}; };
+ template <> struct IsUnsignedIntegral<ushort1> { enum {value = 1}; };
+ template <> struct IsUnsignedIntegral<uint> { enum {value = 1}; };
+ template <> struct IsUnsignedIntegral<uint1> { enum {value = 1}; };
+
+ template <typename T> struct IsIntegral { enum {value = IsSignedIntergral<T>::value || IsUnsignedIntegral<T>::value}; };
+ template <> struct IsIntegral<char> { enum {value = 1}; };
+ template <> struct IsIntegral<bool> { enum {value = 1}; };
+
+ template <typename T> struct IsFloat { enum {value = 0}; };
+ template <> struct IsFloat<float> { enum {value = 1}; };
+ template <> struct IsFloat<double> { enum {value = 1}; };
+
+ template <typename T> struct IsVec { enum {value = 0}; };
+ template <> struct IsVec<uchar1> { enum {value = 1}; };
+ template <> struct IsVec<uchar2> { enum {value = 1}; };
+ template <> struct IsVec<uchar3> { enum {value = 1}; };
+ template <> struct IsVec<uchar4> { enum {value = 1}; };
+ template <> struct IsVec<uchar8> { enum {value = 1}; };
+ template <> struct IsVec<char1> { enum {value = 1}; };
+ template <> struct IsVec<char2> { enum {value = 1}; };
+ template <> struct IsVec<char3> { enum {value = 1}; };
+ template <> struct IsVec<char4> { enum {value = 1}; };
+ template <> struct IsVec<char8> { enum {value = 1}; };
+ template <> struct IsVec<ushort1> { enum {value = 1}; };
+ template <> struct IsVec<ushort2> { enum {value = 1}; };
+ template <> struct IsVec<ushort3> { enum {value = 1}; };
+ template <> struct IsVec<ushort4> { enum {value = 1}; };
+ template <> struct IsVec<ushort8> { enum {value = 1}; };
+ template <> struct IsVec<short1> { enum {value = 1}; };
+ template <> struct IsVec<short2> { enum {value = 1}; };
+ template <> struct IsVec<short3> { enum {value = 1}; };
+ template <> struct IsVec<short4> { enum {value = 1}; };
+ template <> struct IsVec<short8> { enum {value = 1}; };
+ template <> struct IsVec<uint1> { enum {value = 1}; };
+ template <> struct IsVec<uint2> { enum {value = 1}; };
+ template <> struct IsVec<uint3> { enum {value = 1}; };
+ template <> struct IsVec<uint4> { enum {value = 1}; };
+ template <> struct IsVec<uint8> { enum {value = 1}; };
+ template <> struct IsVec<int1> { enum {value = 1}; };
+ template <> struct IsVec<int2> { enum {value = 1}; };
+ template <> struct IsVec<int3> { enum {value = 1}; };
+ template <> struct IsVec<int4> { enum {value = 1}; };
+ template <> struct IsVec<int8> { enum {value = 1}; };
+ template <> struct IsVec<float1> { enum {value = 1}; };
+ template <> struct IsVec<float2> { enum {value = 1}; };
+ template <> struct IsVec<float3> { enum {value = 1}; };
+ template <> struct IsVec<float4> { enum {value = 1}; };
+ template <> struct IsVec<float8> { enum {value = 1}; };
+ template <> struct IsVec<double1> { enum {value = 1}; };
+ template <> struct IsVec<double2> { enum {value = 1}; };
+ template <> struct IsVec<double3> { enum {value = 1}; };
+ template <> struct IsVec<double4> { enum {value = 1}; };
+ template <> struct IsVec<double8> { enum {value = 1}; };
+
+ template <class U> struct AddParameterType { typedef const U& type; };
+ template <class U> struct AddParameterType<U&> { typedef U& type; };
+ template <> struct AddParameterType<void> { typedef void type; };
+
+ template <class U> struct ReferenceTraits
+ {
+ enum { value = false };
+ typedef U type;
+ };
+ template <class U> struct ReferenceTraits<U&>
+ {
+ enum { value = true };
+ typedef U type;
+ };
+
+ template <class U> struct PointerTraits
+ {
+ enum { value = false };
+ typedef void type;
+ };
+ template <class U> struct PointerTraits<U*>
+ {
+ enum { value = true };
+ typedef U type;
+ };
+ template <class U> struct PointerTraits<U*&>
+ {
+ enum { value = true };
+ typedef U type;
+ };
+
+ template <class U> struct UnConst
+ {
+ typedef U type;
+ enum { value = 0 };
+ };
+ template <class U> struct UnConst<const U>
+ {
+ typedef U type;
+ enum { value = 1 };
+ };
+ template <class U> struct UnConst<const U&>
+ {
+ typedef U& type;
+ enum { value = 1 };
+ };
+
+ template <class U> struct UnVolatile
+ {
+ typedef U type;
+ enum { value = 0 };
+ };
+ template <class U> struct UnVolatile<volatile U>
+ {
+ typedef U type;
+ enum { value = 1 };
+ };
+ template <class U> struct UnVolatile<volatile U&>
+ {
+ typedef U& type;
+ enum { value = 1 };
+ };
+ } // namespace type_traits_detail
+}}} // namespace cv { namespace gpu { namespace device
+
+#endif // __OPENCV_GPU_TYPE_TRAITS_DETAIL_HPP__
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/detail/vec_distance_detail.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/detail/vec_distance_detail.hpp
new file mode 100644
index 00000000..78ab5565
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/detail/vec_distance_detail.hpp
@@ -0,0 +1,117 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_GPU_VEC_DISTANCE_DETAIL_HPP__
+#define __OPENCV_GPU_VEC_DISTANCE_DETAIL_HPP__
+
+#include "../datamov_utils.hpp"
+
+namespace cv { namespace gpu { namespace device
+{
+ namespace vec_distance_detail
+ {
+ template <int THREAD_DIM, int N> struct UnrollVecDiffCached
+ {
+ template <typename Dist, typename T1, typename T2>
+ static __device__ void calcCheck(const T1* vecCached, const T2* vecGlob, int len, Dist& dist, int ind)
+ {
+ if (ind < len)
+ {
+ T1 val1 = *vecCached++;
+
+ T2 val2;
+ ForceGlob<T2>::Load(vecGlob, ind, val2);
+
+ dist.reduceIter(val1, val2);
+
+ UnrollVecDiffCached<THREAD_DIM, N - 1>::calcCheck(vecCached, vecGlob, len, dist, ind + THREAD_DIM);
+ }
+ }
+
+ template <typename Dist, typename T1, typename T2>
+ static __device__ void calcWithoutCheck(const T1* vecCached, const T2* vecGlob, Dist& dist)
+ {
+ T1 val1 = *vecCached++;
+
+ T2 val2;
+ ForceGlob<T2>::Load(vecGlob, 0, val2);
+ vecGlob += THREAD_DIM;
+
+ dist.reduceIter(val1, val2);
+
+ UnrollVecDiffCached<THREAD_DIM, N - 1>::calcWithoutCheck(vecCached, vecGlob, dist);
+ }
+ };
+ template <int THREAD_DIM> struct UnrollVecDiffCached<THREAD_DIM, 0>
+ {
+ template <typename Dist, typename T1, typename T2>
+ static __device__ __forceinline__ void calcCheck(const T1*, const T2*, int, Dist&, int)
+ {
+ }
+
+ template <typename Dist, typename T1, typename T2>
+ static __device__ __forceinline__ void calcWithoutCheck(const T1*, const T2*, Dist&)
+ {
+ }
+ };
+
+ template <int THREAD_DIM, int MAX_LEN, bool LEN_EQ_MAX_LEN> struct VecDiffCachedCalculator;
+ template <int THREAD_DIM, int MAX_LEN> struct VecDiffCachedCalculator<THREAD_DIM, MAX_LEN, false>
+ {
+ template <typename Dist, typename T1, typename T2>
+ static __device__ __forceinline__ void calc(const T1* vecCached, const T2* vecGlob, int len, Dist& dist, int tid)
+ {
+ UnrollVecDiffCached<THREAD_DIM, MAX_LEN / THREAD_DIM>::calcCheck(vecCached, vecGlob, len, dist, tid);
+ }
+ };
+ template <int THREAD_DIM, int MAX_LEN> struct VecDiffCachedCalculator<THREAD_DIM, MAX_LEN, true>
+ {
+ template <typename Dist, typename T1, typename T2>
+ static __device__ __forceinline__ void calc(const T1* vecCached, const T2* vecGlob, int len, Dist& dist, int tid)
+ {
+ UnrollVecDiffCached<THREAD_DIM, MAX_LEN / THREAD_DIM>::calcWithoutCheck(vecCached, vecGlob + tid, dist);
+ }
+ };
+ } // namespace vec_distance_detail
+}}} // namespace cv { namespace gpu { namespace device
+
+#endif // __OPENCV_GPU_VEC_DISTANCE_DETAIL_HPP__
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/dynamic_smem.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/dynamic_smem.hpp
new file mode 100644
index 00000000..cf431d95
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/dynamic_smem.hpp
@@ -0,0 +1,80 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_GPU_DYNAMIC_SMEM_HPP__
+#define __OPENCV_GPU_DYNAMIC_SMEM_HPP__
+
+namespace cv { namespace gpu { namespace device
+{
+ template<class T> struct DynamicSharedMem
+ {
+ __device__ __forceinline__ operator T*()
+ {
+ extern __shared__ int __smem[];
+ return (T*)__smem;
+ }
+
+ __device__ __forceinline__ operator const T*() const
+ {
+ extern __shared__ int __smem[];
+ return (T*)__smem;
+ }
+ };
+
+ // specialize for double to avoid unaligned memory access compile errors
+ template<> struct DynamicSharedMem<double>
+ {
+ __device__ __forceinline__ operator double*()
+ {
+ extern __shared__ double __smem_d[];
+ return (double*)__smem_d;
+ }
+
+ __device__ __forceinline__ operator const double*() const
+ {
+ extern __shared__ double __smem_d[];
+ return (double*)__smem_d;
+ }
+ };
+}}}
+
+#endif // __OPENCV_GPU_DYNAMIC_SMEM_HPP__
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/emulation.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/emulation.hpp
new file mode 100644
index 00000000..bf47bc5f
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/emulation.hpp
@@ -0,0 +1,138 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef OPENCV_GPU_EMULATION_HPP_
+#define OPENCV_GPU_EMULATION_HPP_
+
+#include "warp_reduce.hpp"
+
+namespace cv { namespace gpu { namespace device
+{
+ struct Emulation
+ {
+
+ static __device__ __forceinline__ int syncthreadsOr(int pred)
+ {
+#if defined (__CUDA_ARCH__) && (__CUDA_ARCH__ < 200)
+ // just campilation stab
+ return 0;
+#else
+ return __syncthreads_or(pred);
+#endif
+ }
+
+ template<int CTA_SIZE>
+ static __forceinline__ __device__ int Ballot(int predicate)
+ {
+#if defined (__CUDA_ARCH__) && (__CUDA_ARCH__ >= 200)
+ return __ballot(predicate);
+#else
+ __shared__ volatile int cta_buffer[CTA_SIZE];
+
+ int tid = threadIdx.x;
+ cta_buffer[tid] = predicate ? (1 << (tid & 31)) : 0;
+ return warp_reduce(cta_buffer);
+#endif
+ }
+
+ struct smem
+ {
+ enum { TAG_MASK = (1U << ( (sizeof(unsigned int) << 3) - 5U)) - 1U };
+
+ template<typename T>
+ static __device__ __forceinline__ T atomicInc(T* address, T val)
+ {
+#if defined (__CUDA_ARCH__) && (__CUDA_ARCH__ < 120)
+ T count;
+ unsigned int tag = threadIdx.x << ( (sizeof(unsigned int) << 3) - 5U);
+ do
+ {
+ count = *address & TAG_MASK;
+ count = tag | (count + 1);
+ *address = count;
+ } while (*address != count);
+
+ return (count & TAG_MASK) - 1;
+#else
+ return ::atomicInc(address, val);
+#endif
+ }
+
+ template<typename T>
+ static __device__ __forceinline__ T atomicAdd(T* address, T val)
+ {
+#if defined (__CUDA_ARCH__) && (__CUDA_ARCH__ < 120)
+ T count;
+ unsigned int tag = threadIdx.x << ( (sizeof(unsigned int) << 3) - 5U);
+ do
+ {
+ count = *address & TAG_MASK;
+ count = tag | (count + val);
+ *address = count;
+ } while (*address != count);
+
+ return (count & TAG_MASK) - val;
+#else
+ return ::atomicAdd(address, val);
+#endif
+ }
+
+ template<typename T>
+ static __device__ __forceinline__ T atomicMin(T* address, T val)
+ {
+#if defined (__CUDA_ARCH__) && (__CUDA_ARCH__ < 120)
+ T count = ::min(*address, val);
+ do
+ {
+ *address = count;
+ } while (*address > count);
+
+ return count;
+#else
+ return ::atomicMin(address, val);
+#endif
+ }
+ };
+ };
+}}} // namespace cv { namespace gpu { namespace device
+
+#endif /* OPENCV_GPU_EMULATION_HPP_ */
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/filters.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/filters.hpp
new file mode 100644
index 00000000..d193969a
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/filters.hpp
@@ -0,0 +1,278 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_GPU_FILTERS_HPP__
+#define __OPENCV_GPU_FILTERS_HPP__
+
+#include "saturate_cast.hpp"
+#include "vec_traits.hpp"
+#include "vec_math.hpp"
+#include "type_traits.hpp"
+
+namespace cv { namespace gpu { namespace device
+{
+ template <typename Ptr2D> struct PointFilter
+ {
+ typedef typename Ptr2D::elem_type elem_type;
+ typedef float index_type;
+
+ explicit __host__ __device__ __forceinline__ PointFilter(const Ptr2D& src_, float fx = 0.f, float fy = 0.f)
+ : src(src_)
+ {
+ (void)fx;
+ (void)fy;
+ }
+
+ __device__ __forceinline__ elem_type operator ()(float y, float x) const
+ {
+ return src(__float2int_rz(y), __float2int_rz(x));
+ }
+
+ const Ptr2D src;
+ };
+
+ template <typename Ptr2D> struct LinearFilter
+ {
+ typedef typename Ptr2D::elem_type elem_type;
+ typedef float index_type;
+
+ explicit __host__ __device__ __forceinline__ LinearFilter(const Ptr2D& src_, float fx = 0.f, float fy = 0.f)
+ : src(src_)
+ {
+ (void)fx;
+ (void)fy;
+ }
+ __device__ __forceinline__ elem_type operator ()(float y, float x) const
+ {
+ typedef typename TypeVec<float, VecTraits<elem_type>::cn>::vec_type work_type;
+
+ work_type out = VecTraits<work_type>::all(0);
+
+ const int x1 = __float2int_rd(x);
+ const int y1 = __float2int_rd(y);
+ const int x2 = x1 + 1;
+ const int y2 = y1 + 1;
+
+ elem_type src_reg = src(y1, x1);
+ out = out + src_reg * ((x2 - x) * (y2 - y));
+
+ src_reg = src(y1, x2);
+ out = out + src_reg * ((x - x1) * (y2 - y));
+
+ src_reg = src(y2, x1);
+ out = out + src_reg * ((x2 - x) * (y - y1));
+
+ src_reg = src(y2, x2);
+ out = out + src_reg * ((x - x1) * (y - y1));
+
+ return saturate_cast<elem_type>(out);
+ }
+
+ const Ptr2D src;
+ };
+
+ template <typename Ptr2D> struct CubicFilter
+ {
+ typedef typename Ptr2D::elem_type elem_type;
+ typedef float index_type;
+ typedef typename TypeVec<float, VecTraits<elem_type>::cn>::vec_type work_type;
+
+ explicit __host__ __device__ __forceinline__ CubicFilter(const Ptr2D& src_, float fx = 0.f, float fy = 0.f)
+ : src(src_)
+ {
+ (void)fx;
+ (void)fy;
+ }
+
+ static __device__ __forceinline__ float bicubicCoeff(float x_)
+ {
+ float x = fabsf(x_);
+ if (x <= 1.0f)
+ {
+ return x * x * (1.5f * x - 2.5f) + 1.0f;
+ }
+ else if (x < 2.0f)
+ {
+ return x * (x * (-0.5f * x + 2.5f) - 4.0f) + 2.0f;
+ }
+ else
+ {
+ return 0.0f;
+ }
+ }
+
+ __device__ elem_type operator ()(float y, float x) const
+ {
+ const float xmin = ::ceilf(x - 2.0f);
+ const float xmax = ::floorf(x + 2.0f);
+
+ const float ymin = ::ceilf(y - 2.0f);
+ const float ymax = ::floorf(y + 2.0f);
+
+ work_type sum = VecTraits<work_type>::all(0);
+ float wsum = 0.0f;
+
+ for (float cy = ymin; cy <= ymax; cy += 1.0f)
+ {
+ for (float cx = xmin; cx <= xmax; cx += 1.0f)
+ {
+ const float w = bicubicCoeff(x - cx) * bicubicCoeff(y - cy);
+ sum = sum + w * src(__float2int_rd(cy), __float2int_rd(cx));
+ wsum += w;
+ }
+ }
+
+ work_type res = (!wsum)? VecTraits<work_type>::all(0) : sum / wsum;
+
+ return saturate_cast<elem_type>(res);
+ }
+
+ const Ptr2D src;
+ };
+ // for integer scaling
+ template <typename Ptr2D> struct IntegerAreaFilter
+ {
+ typedef typename Ptr2D::elem_type elem_type;
+ typedef float index_type;
+
+ explicit __host__ __device__ __forceinline__ IntegerAreaFilter(const Ptr2D& src_, float scale_x_, float scale_y_)
+ : src(src_), scale_x(scale_x_), scale_y(scale_y_), scale(1.f / (scale_x * scale_y)) {}
+
+ __device__ __forceinline__ elem_type operator ()(float y, float x) const
+ {
+ float fsx1 = x * scale_x;
+ float fsx2 = fsx1 + scale_x;
+
+ int sx1 = __float2int_ru(fsx1);
+ int sx2 = __float2int_rd(fsx2);
+
+ float fsy1 = y * scale_y;
+ float fsy2 = fsy1 + scale_y;
+
+ int sy1 = __float2int_ru(fsy1);
+ int sy2 = __float2int_rd(fsy2);
+
+ typedef typename TypeVec<float, VecTraits<elem_type>::cn>::vec_type work_type;
+ work_type out = VecTraits<work_type>::all(0.f);
+
+ for(int dy = sy1; dy < sy2; ++dy)
+ for(int dx = sx1; dx < sx2; ++dx)
+ {
+ out = out + src(dy, dx) * scale;
+ }
+
+ return saturate_cast<elem_type>(out);
+ }
+
+ const Ptr2D src;
+ float scale_x, scale_y ,scale;
+ };
+
+ template <typename Ptr2D> struct AreaFilter
+ {
+ typedef typename Ptr2D::elem_type elem_type;
+ typedef float index_type;
+
+ explicit __host__ __device__ __forceinline__ AreaFilter(const Ptr2D& src_, float scale_x_, float scale_y_)
+ : src(src_), scale_x(scale_x_), scale_y(scale_y_){}
+
+ __device__ __forceinline__ elem_type operator ()(float y, float x) const
+ {
+ float fsx1 = x * scale_x;
+ float fsx2 = fsx1 + scale_x;
+
+ int sx1 = __float2int_ru(fsx1);
+ int sx2 = __float2int_rd(fsx2);
+
+ float fsy1 = y * scale_y;
+ float fsy2 = fsy1 + scale_y;
+
+ int sy1 = __float2int_ru(fsy1);
+ int sy2 = __float2int_rd(fsy2);
+
+ float scale = 1.f / (fminf(scale_x, src.width - fsx1) * fminf(scale_y, src.height - fsy1));
+
+ typedef typename TypeVec<float, VecTraits<elem_type>::cn>::vec_type work_type;
+ work_type out = VecTraits<work_type>::all(0.f);
+
+ for (int dy = sy1; dy < sy2; ++dy)
+ {
+ for (int dx = sx1; dx < sx2; ++dx)
+ out = out + src(dy, dx) * scale;
+
+ if (sx1 > fsx1)
+ out = out + src(dy, (sx1 -1) ) * ((sx1 - fsx1) * scale);
+
+ if (sx2 < fsx2)
+ out = out + src(dy, sx2) * ((fsx2 -sx2) * scale);
+ }
+
+ if (sy1 > fsy1)
+ for (int dx = sx1; dx < sx2; ++dx)
+ out = out + src( (sy1 - 1) , dx) * ((sy1 -fsy1) * scale);
+
+ if (sy2 < fsy2)
+ for (int dx = sx1; dx < sx2; ++dx)
+ out = out + src(sy2, dx) * ((fsy2 -sy2) * scale);
+
+ if ((sy1 > fsy1) && (sx1 > fsx1))
+ out = out + src( (sy1 - 1) , (sx1 - 1)) * ((sy1 -fsy1) * (sx1 -fsx1) * scale);
+
+ if ((sy1 > fsy1) && (sx2 < fsx2))
+ out = out + src( (sy1 - 1) , sx2) * ((sy1 -fsy1) * (fsx2 -sx2) * scale);
+
+ if ((sy2 < fsy2) && (sx2 < fsx2))
+ out = out + src(sy2, sx2) * ((fsy2 -sy2) * (fsx2 -sx2) * scale);
+
+ if ((sy2 < fsy2) && (sx1 > fsx1))
+ out = out + src(sy2, (sx1 - 1)) * ((fsy2 -sy2) * (sx1 -fsx1) * scale);
+
+ return saturate_cast<elem_type>(out);
+ }
+
+ const Ptr2D src;
+ float scale_x, scale_y;
+ int width, haight;
+ };
+}}} // namespace cv { namespace gpu { namespace device
+
+#endif // __OPENCV_GPU_FILTERS_HPP__
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/funcattrib.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/funcattrib.hpp
new file mode 100644
index 00000000..2ed79802
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/funcattrib.hpp
@@ -0,0 +1,71 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_GPU_DEVICE_FUNCATTRIB_HPP_
+#define __OPENCV_GPU_DEVICE_FUNCATTRIB_HPP_
+
+#include <cstdio>
+
+namespace cv { namespace gpu { namespace device
+{
+ template<class Func>
+ void printFuncAttrib(Func& func)
+ {
+
+ cudaFuncAttributes attrs;
+ cudaFuncGetAttributes(&attrs, func);
+
+ printf("=== Function stats ===\n");
+ printf("Name: \n");
+ printf("sharedSizeBytes = %d\n", attrs.sharedSizeBytes);
+ printf("constSizeBytes = %d\n", attrs.constSizeBytes);
+ printf("localSizeBytes = %d\n", attrs.localSizeBytes);
+ printf("maxThreadsPerBlock = %d\n", attrs.maxThreadsPerBlock);
+ printf("numRegs = %d\n", attrs.numRegs);
+ printf("ptxVersion = %d\n", attrs.ptxVersion);
+ printf("binaryVersion = %d\n", attrs.binaryVersion);
+ printf("\n");
+ fflush(stdout);
+ }
+}}} // namespace cv { namespace gpu { namespace device
+
+#endif /* __OPENCV_GPU_DEVICE_FUNCATTRIB_HPP_ */
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/functional.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/functional.hpp
new file mode 100644
index 00000000..db264735
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/functional.hpp
@@ -0,0 +1,789 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_GPU_FUNCTIONAL_HPP__
+#define __OPENCV_GPU_FUNCTIONAL_HPP__
+
+#include <functional>
+#include "saturate_cast.hpp"
+#include "vec_traits.hpp"
+#include "type_traits.hpp"
+#include "device_functions.h"
+
+namespace cv { namespace gpu { namespace device
+{
+ // Function Objects
+ template<typename Argument, typename Result> struct unary_function : public std::unary_function<Argument, Result> {};
+ template<typename Argument1, typename Argument2, typename Result> struct binary_function : public std::binary_function<Argument1, Argument2, Result> {};
+
+ // Arithmetic Operations
+ template <typename T> struct plus : binary_function<T, T, T>
+ {
+ __device__ __forceinline__ T operator ()(typename TypeTraits<T>::ParameterType a,
+ typename TypeTraits<T>::ParameterType b) const
+ {
+ return a + b;
+ }
+ __host__ __device__ __forceinline__ plus() {}
+ __host__ __device__ __forceinline__ plus(const plus&) {}
+ };
+
+ template <typename T> struct minus : binary_function<T, T, T>
+ {
+ __device__ __forceinline__ T operator ()(typename TypeTraits<T>::ParameterType a,
+ typename TypeTraits<T>::ParameterType b) const
+ {
+ return a - b;
+ }
+ __host__ __device__ __forceinline__ minus() {}
+ __host__ __device__ __forceinline__ minus(const minus&) {}
+ };
+
+ template <typename T> struct multiplies : binary_function<T, T, T>
+ {
+ __device__ __forceinline__ T operator ()(typename TypeTraits<T>::ParameterType a,
+ typename TypeTraits<T>::ParameterType b) const
+ {
+ return a * b;
+ }
+ __host__ __device__ __forceinline__ multiplies() {}
+ __host__ __device__ __forceinline__ multiplies(const multiplies&) {}
+ };
+
+ template <typename T> struct divides : binary_function<T, T, T>
+ {
+ __device__ __forceinline__ T operator ()(typename TypeTraits<T>::ParameterType a,
+ typename TypeTraits<T>::ParameterType b) const
+ {
+ return a / b;
+ }
+ __host__ __device__ __forceinline__ divides() {}
+ __host__ __device__ __forceinline__ divides(const divides&) {}
+ };
+
+ template <typename T> struct modulus : binary_function<T, T, T>
+ {
+ __device__ __forceinline__ T operator ()(typename TypeTraits<T>::ParameterType a,
+ typename TypeTraits<T>::ParameterType b) const
+ {
+ return a % b;
+ }
+ __host__ __device__ __forceinline__ modulus() {}
+ __host__ __device__ __forceinline__ modulus(const modulus&) {}
+ };
+
+ template <typename T> struct negate : unary_function<T, T>
+ {
+ __device__ __forceinline__ T operator ()(typename TypeTraits<T>::ParameterType a) const
+ {
+ return -a;
+ }
+ __host__ __device__ __forceinline__ negate() {}
+ __host__ __device__ __forceinline__ negate(const negate&) {}
+ };
+
+ // Comparison Operations
+ template <typename T> struct equal_to : binary_function<T, T, bool>
+ {
+ __device__ __forceinline__ bool operator ()(typename TypeTraits<T>::ParameterType a,
+ typename TypeTraits<T>::ParameterType b) const
+ {
+ return a == b;
+ }
+ __host__ __device__ __forceinline__ equal_to() {}
+ __host__ __device__ __forceinline__ equal_to(const equal_to&) {}
+ };
+
+ template <typename T> struct not_equal_to : binary_function<T, T, bool>
+ {
+ __device__ __forceinline__ bool operator ()(typename TypeTraits<T>::ParameterType a,
+ typename TypeTraits<T>::ParameterType b) const
+ {
+ return a != b;
+ }
+ __host__ __device__ __forceinline__ not_equal_to() {}
+ __host__ __device__ __forceinline__ not_equal_to(const not_equal_to&) {}
+ };
+
+ template <typename T> struct greater : binary_function<T, T, bool>
+ {
+ __device__ __forceinline__ bool operator ()(typename TypeTraits<T>::ParameterType a,
+ typename TypeTraits<T>::ParameterType b) const
+ {
+ return a > b;
+ }
+ __host__ __device__ __forceinline__ greater() {}
+ __host__ __device__ __forceinline__ greater(const greater&) {}
+ };
+
+ template <typename T> struct less : binary_function<T, T, bool>
+ {
+ __device__ __forceinline__ bool operator ()(typename TypeTraits<T>::ParameterType a,
+ typename TypeTraits<T>::ParameterType b) const
+ {
+ return a < b;
+ }
+ __host__ __device__ __forceinline__ less() {}
+ __host__ __device__ __forceinline__ less(const less&) {}
+ };
+
+ template <typename T> struct greater_equal : binary_function<T, T, bool>
+ {
+ __device__ __forceinline__ bool operator ()(typename TypeTraits<T>::ParameterType a,
+ typename TypeTraits<T>::ParameterType b) const
+ {
+ return a >= b;
+ }
+ __host__ __device__ __forceinline__ greater_equal() {}
+ __host__ __device__ __forceinline__ greater_equal(const greater_equal&) {}
+ };
+
+ template <typename T> struct less_equal : binary_function<T, T, bool>
+ {
+ __device__ __forceinline__ bool operator ()(typename TypeTraits<T>::ParameterType a,
+ typename TypeTraits<T>::ParameterType b) const
+ {
+ return a <= b;
+ }
+ __host__ __device__ __forceinline__ less_equal() {}
+ __host__ __device__ __forceinline__ less_equal(const less_equal&) {}
+ };
+
+ // Logical Operations
+ template <typename T> struct logical_and : binary_function<T, T, bool>
+ {
+ __device__ __forceinline__ bool operator ()(typename TypeTraits<T>::ParameterType a,
+ typename TypeTraits<T>::ParameterType b) const
+ {
+ return a && b;
+ }
+ __host__ __device__ __forceinline__ logical_and() {}
+ __host__ __device__ __forceinline__ logical_and(const logical_and&) {}
+ };
+
+ template <typename T> struct logical_or : binary_function<T, T, bool>
+ {
+ __device__ __forceinline__ bool operator ()(typename TypeTraits<T>::ParameterType a,
+ typename TypeTraits<T>::ParameterType b) const
+ {
+ return a || b;
+ }
+ __host__ __device__ __forceinline__ logical_or() {}
+ __host__ __device__ __forceinline__ logical_or(const logical_or&) {}
+ };
+
+ template <typename T> struct logical_not : unary_function<T, bool>
+ {
+ __device__ __forceinline__ bool operator ()(typename TypeTraits<T>::ParameterType a) const
+ {
+ return !a;
+ }
+ __host__ __device__ __forceinline__ logical_not() {}
+ __host__ __device__ __forceinline__ logical_not(const logical_not&) {}
+ };
+
+ // Bitwise Operations
+ template <typename T> struct bit_and : binary_function<T, T, T>
+ {
+ __device__ __forceinline__ T operator ()(typename TypeTraits<T>::ParameterType a,
+ typename TypeTraits<T>::ParameterType b) const
+ {
+ return a & b;
+ }
+ __host__ __device__ __forceinline__ bit_and() {}
+ __host__ __device__ __forceinline__ bit_and(const bit_and&) {}
+ };
+
+ template <typename T> struct bit_or : binary_function<T, T, T>
+ {
+ __device__ __forceinline__ T operator ()(typename TypeTraits<T>::ParameterType a,
+ typename TypeTraits<T>::ParameterType b) const
+ {
+ return a | b;
+ }
+ __host__ __device__ __forceinline__ bit_or() {}
+ __host__ __device__ __forceinline__ bit_or(const bit_or&) {}
+ };
+
+ template <typename T> struct bit_xor : binary_function<T, T, T>
+ {
+ __device__ __forceinline__ T operator ()(typename TypeTraits<T>::ParameterType a,
+ typename TypeTraits<T>::ParameterType b) const
+ {
+ return a ^ b;
+ }
+ __host__ __device__ __forceinline__ bit_xor() {}
+ __host__ __device__ __forceinline__ bit_xor(const bit_xor&) {}
+ };
+
+ template <typename T> struct bit_not : unary_function<T, T>
+ {
+ __device__ __forceinline__ T operator ()(typename TypeTraits<T>::ParameterType v) const
+ {
+ return ~v;
+ }
+ __host__ __device__ __forceinline__ bit_not() {}
+ __host__ __device__ __forceinline__ bit_not(const bit_not&) {}
+ };
+
+ // Generalized Identity Operations
+ template <typename T> struct identity : unary_function<T, T>
+ {
+ __device__ __forceinline__ typename TypeTraits<T>::ParameterType operator()(typename TypeTraits<T>::ParameterType x) const
+ {
+ return x;
+ }
+ __host__ __device__ __forceinline__ identity() {}
+ __host__ __device__ __forceinline__ identity(const identity&) {}
+ };
+
+ template <typename T1, typename T2> struct project1st : binary_function<T1, T2, T1>
+ {
+ __device__ __forceinline__ typename TypeTraits<T1>::ParameterType operator()(typename TypeTraits<T1>::ParameterType lhs, typename TypeTraits<T2>::ParameterType rhs) const
+ {
+ return lhs;
+ }
+ __host__ __device__ __forceinline__ project1st() {}
+ __host__ __device__ __forceinline__ project1st(const project1st&) {}
+ };
+
+ template <typename T1, typename T2> struct project2nd : binary_function<T1, T2, T2>
+ {
+ __device__ __forceinline__ typename TypeTraits<T2>::ParameterType operator()(typename TypeTraits<T1>::ParameterType lhs, typename TypeTraits<T2>::ParameterType rhs) const
+ {
+ return rhs;
+ }
+ __host__ __device__ __forceinline__ project2nd() {}
+ __host__ __device__ __forceinline__ project2nd(const project2nd&) {}
+ };
+
+ // Min/Max Operations
+
+#define OPENCV_GPU_IMPLEMENT_MINMAX(name, type, op) \
+ template <> struct name<type> : binary_function<type, type, type> \
+ { \
+ __device__ __forceinline__ type operator()(type lhs, type rhs) const {return op(lhs, rhs);} \
+ __host__ __device__ __forceinline__ name() {}\
+ __host__ __device__ __forceinline__ name(const name&) {}\
+ };
+
+ template <typename T> struct maximum : binary_function<T, T, T>
+ {
+ __device__ __forceinline__ T operator()(typename TypeTraits<T>::ParameterType lhs, typename TypeTraits<T>::ParameterType rhs) const
+ {
+ return max(lhs, rhs);
+ }
+ __host__ __device__ __forceinline__ maximum() {}
+ __host__ __device__ __forceinline__ maximum(const maximum&) {}
+ };
+
+ OPENCV_GPU_IMPLEMENT_MINMAX(maximum, uchar, ::max)
+ OPENCV_GPU_IMPLEMENT_MINMAX(maximum, schar, ::max)
+ OPENCV_GPU_IMPLEMENT_MINMAX(maximum, char, ::max)
+ OPENCV_GPU_IMPLEMENT_MINMAX(maximum, ushort, ::max)
+ OPENCV_GPU_IMPLEMENT_MINMAX(maximum, short, ::max)
+ OPENCV_GPU_IMPLEMENT_MINMAX(maximum, int, ::max)
+ OPENCV_GPU_IMPLEMENT_MINMAX(maximum, uint, ::max)
+ OPENCV_GPU_IMPLEMENT_MINMAX(maximum, float, ::fmax)
+ OPENCV_GPU_IMPLEMENT_MINMAX(maximum, double, ::fmax)
+
+ template <typename T> struct minimum : binary_function<T, T, T>
+ {
+ __device__ __forceinline__ T operator()(typename TypeTraits<T>::ParameterType lhs, typename TypeTraits<T>::ParameterType rhs) const
+ {
+ return min(lhs, rhs);
+ }
+ __host__ __device__ __forceinline__ minimum() {}
+ __host__ __device__ __forceinline__ minimum(const minimum&) {}
+ };
+
+ OPENCV_GPU_IMPLEMENT_MINMAX(minimum, uchar, ::min)
+ OPENCV_GPU_IMPLEMENT_MINMAX(minimum, schar, ::min)
+ OPENCV_GPU_IMPLEMENT_MINMAX(minimum, char, ::min)
+ OPENCV_GPU_IMPLEMENT_MINMAX(minimum, ushort, ::min)
+ OPENCV_GPU_IMPLEMENT_MINMAX(minimum, short, ::min)
+ OPENCV_GPU_IMPLEMENT_MINMAX(minimum, int, ::min)
+ OPENCV_GPU_IMPLEMENT_MINMAX(minimum, uint, ::min)
+ OPENCV_GPU_IMPLEMENT_MINMAX(minimum, float, ::fmin)
+ OPENCV_GPU_IMPLEMENT_MINMAX(minimum, double, ::fmin)
+
+#undef OPENCV_GPU_IMPLEMENT_MINMAX
+
+ // Math functions
+
+ template <typename T> struct abs_func : unary_function<T, T>
+ {
+ __device__ __forceinline__ T operator ()(typename TypeTraits<T>::ParameterType x) const
+ {
+ return abs(x);
+ }
+
+ __host__ __device__ __forceinline__ abs_func() {}
+ __host__ __device__ __forceinline__ abs_func(const abs_func&) {}
+ };
+ template <> struct abs_func<unsigned char> : unary_function<unsigned char, unsigned char>
+ {
+ __device__ __forceinline__ unsigned char operator ()(unsigned char x) const
+ {
+ return x;
+ }
+
+ __host__ __device__ __forceinline__ abs_func() {}
+ __host__ __device__ __forceinline__ abs_func(const abs_func&) {}
+ };
+ template <> struct abs_func<signed char> : unary_function<signed char, signed char>
+ {
+ __device__ __forceinline__ signed char operator ()(signed char x) const
+ {
+ return ::abs((int)x);
+ }
+
+ __host__ __device__ __forceinline__ abs_func() {}
+ __host__ __device__ __forceinline__ abs_func(const abs_func&) {}
+ };
+ template <> struct abs_func<char> : unary_function<char, char>
+ {
+ __device__ __forceinline__ char operator ()(char x) const
+ {
+ return ::abs((int)x);
+ }
+
+ __host__ __device__ __forceinline__ abs_func() {}
+ __host__ __device__ __forceinline__ abs_func(const abs_func&) {}
+ };
+ template <> struct abs_func<unsigned short> : unary_function<unsigned short, unsigned short>
+ {
+ __device__ __forceinline__ unsigned short operator ()(unsigned short x) const
+ {
+ return x;
+ }
+
+ __host__ __device__ __forceinline__ abs_func() {}
+ __host__ __device__ __forceinline__ abs_func(const abs_func&) {}
+ };
+ template <> struct abs_func<short> : unary_function<short, short>
+ {
+ __device__ __forceinline__ short operator ()(short x) const
+ {
+ return ::abs((int)x);
+ }
+
+ __host__ __device__ __forceinline__ abs_func() {}
+ __host__ __device__ __forceinline__ abs_func(const abs_func&) {}
+ };
+ template <> struct abs_func<unsigned int> : unary_function<unsigned int, unsigned int>
+ {
+ __device__ __forceinline__ unsigned int operator ()(unsigned int x) const
+ {
+ return x;
+ }
+
+ __host__ __device__ __forceinline__ abs_func() {}
+ __host__ __device__ __forceinline__ abs_func(const abs_func&) {}
+ };
+ template <> struct abs_func<int> : unary_function<int, int>
+ {
+ __device__ __forceinline__ int operator ()(int x) const
+ {
+ return ::abs(x);
+ }
+
+ __host__ __device__ __forceinline__ abs_func() {}
+ __host__ __device__ __forceinline__ abs_func(const abs_func&) {}
+ };
+ template <> struct abs_func<float> : unary_function<float, float>
+ {
+ __device__ __forceinline__ float operator ()(float x) const
+ {
+ return ::fabsf(x);
+ }
+
+ __host__ __device__ __forceinline__ abs_func() {}
+ __host__ __device__ __forceinline__ abs_func(const abs_func&) {}
+ };
+ template <> struct abs_func<double> : unary_function<double, double>
+ {
+ __device__ __forceinline__ double operator ()(double x) const
+ {
+ return ::fabs(x);
+ }
+
+ __host__ __device__ __forceinline__ abs_func() {}
+ __host__ __device__ __forceinline__ abs_func(const abs_func&) {}
+ };
+
+#define OPENCV_GPU_IMPLEMENT_UN_FUNCTOR(name, func) \
+ template <typename T> struct name ## _func : unary_function<T, float> \
+ { \
+ __device__ __forceinline__ float operator ()(typename TypeTraits<T>::ParameterType v) const \
+ { \
+ return func ## f(v); \
+ } \
+ __host__ __device__ __forceinline__ name ## _func() {} \
+ __host__ __device__ __forceinline__ name ## _func(const name ## _func&) {} \
+ }; \
+ template <> struct name ## _func<double> : unary_function<double, double> \
+ { \
+ __device__ __forceinline__ double operator ()(double v) const \
+ { \
+ return func(v); \
+ } \
+ __host__ __device__ __forceinline__ name ## _func() {} \
+ __host__ __device__ __forceinline__ name ## _func(const name ## _func&) {} \
+ };
+
+#define OPENCV_GPU_IMPLEMENT_BIN_FUNCTOR(name, func) \
+ template <typename T> struct name ## _func : binary_function<T, T, float> \
+ { \
+ __device__ __forceinline__ float operator ()(typename TypeTraits<T>::ParameterType v1, typename TypeTraits<T>::ParameterType v2) const \
+ { \
+ return func ## f(v1, v2); \
+ } \
+ __host__ __device__ __forceinline__ name ## _func() {} \
+ __host__ __device__ __forceinline__ name ## _func(const name ## _func&) {} \
+ }; \
+ template <> struct name ## _func<double> : binary_function<double, double, double> \
+ { \
+ __device__ __forceinline__ double operator ()(double v1, double v2) const \
+ { \
+ return func(v1, v2); \
+ } \
+ __host__ __device__ __forceinline__ name ## _func() {} \
+ __host__ __device__ __forceinline__ name ## _func(const name ## _func&) {} \
+ };
+
+ OPENCV_GPU_IMPLEMENT_UN_FUNCTOR(sqrt, ::sqrt)
+ OPENCV_GPU_IMPLEMENT_UN_FUNCTOR(exp, ::exp)
+ OPENCV_GPU_IMPLEMENT_UN_FUNCTOR(exp2, ::exp2)
+ OPENCV_GPU_IMPLEMENT_UN_FUNCTOR(exp10, ::exp10)
+ OPENCV_GPU_IMPLEMENT_UN_FUNCTOR(log, ::log)
+ OPENCV_GPU_IMPLEMENT_UN_FUNCTOR(log2, ::log2)
+ OPENCV_GPU_IMPLEMENT_UN_FUNCTOR(log10, ::log10)
+ OPENCV_GPU_IMPLEMENT_UN_FUNCTOR(sin, ::sin)
+ OPENCV_GPU_IMPLEMENT_UN_FUNCTOR(cos, ::cos)
+ OPENCV_GPU_IMPLEMENT_UN_FUNCTOR(tan, ::tan)
+ OPENCV_GPU_IMPLEMENT_UN_FUNCTOR(asin, ::asin)
+ OPENCV_GPU_IMPLEMENT_UN_FUNCTOR(acos, ::acos)
+ OPENCV_GPU_IMPLEMENT_UN_FUNCTOR(atan, ::atan)
+ OPENCV_GPU_IMPLEMENT_UN_FUNCTOR(sinh, ::sinh)
+ OPENCV_GPU_IMPLEMENT_UN_FUNCTOR(cosh, ::cosh)
+ OPENCV_GPU_IMPLEMENT_UN_FUNCTOR(tanh, ::tanh)
+ OPENCV_GPU_IMPLEMENT_UN_FUNCTOR(asinh, ::asinh)
+ OPENCV_GPU_IMPLEMENT_UN_FUNCTOR(acosh, ::acosh)
+ OPENCV_GPU_IMPLEMENT_UN_FUNCTOR(atanh, ::atanh)
+
+ OPENCV_GPU_IMPLEMENT_BIN_FUNCTOR(hypot, ::hypot)
+ OPENCV_GPU_IMPLEMENT_BIN_FUNCTOR(atan2, ::atan2)
+ OPENCV_GPU_IMPLEMENT_BIN_FUNCTOR(pow, ::pow)
+
+ #undef OPENCV_GPU_IMPLEMENT_UN_FUNCTOR
+ #undef OPENCV_GPU_IMPLEMENT_UN_FUNCTOR_NO_DOUBLE
+ #undef OPENCV_GPU_IMPLEMENT_BIN_FUNCTOR
+
+ template<typename T> struct hypot_sqr_func : binary_function<T, T, float>
+ {
+ __device__ __forceinline__ T operator ()(typename TypeTraits<T>::ParameterType src1, typename TypeTraits<T>::ParameterType src2) const
+ {
+ return src1 * src1 + src2 * src2;
+ }
+ __host__ __device__ __forceinline__ hypot_sqr_func() {}
+ __host__ __device__ __forceinline__ hypot_sqr_func(const hypot_sqr_func&) {}
+ };
+
+ // Saturate Cast Functor
+ template <typename T, typename D> struct saturate_cast_func : unary_function<T, D>
+ {
+ __device__ __forceinline__ D operator ()(typename TypeTraits<T>::ParameterType v) const
+ {
+ return saturate_cast<D>(v);
+ }
+ __host__ __device__ __forceinline__ saturate_cast_func() {}
+ __host__ __device__ __forceinline__ saturate_cast_func(const saturate_cast_func&) {}
+ };
+
+ // Threshold Functors
+ template <typename T> struct thresh_binary_func : unary_function<T, T>
+ {
+ __host__ __device__ __forceinline__ thresh_binary_func(T thresh_, T maxVal_) : thresh(thresh_), maxVal(maxVal_) {}
+
+ __device__ __forceinline__ T operator()(typename TypeTraits<T>::ParameterType src) const
+ {
+ return (src > thresh) * maxVal;
+ }
+
+ __host__ __device__ __forceinline__ thresh_binary_func() {}
+ __host__ __device__ __forceinline__ thresh_binary_func(const thresh_binary_func& other)
+ : thresh(other.thresh), maxVal(other.maxVal) {}
+
+ const T thresh;
+ const T maxVal;
+ };
+
+ template <typename T> struct thresh_binary_inv_func : unary_function<T, T>
+ {
+ __host__ __device__ __forceinline__ thresh_binary_inv_func(T thresh_, T maxVal_) : thresh(thresh_), maxVal(maxVal_) {}
+
+ __device__ __forceinline__ T operator()(typename TypeTraits<T>::ParameterType src) const
+ {
+ return (src <= thresh) * maxVal;
+ }
+
+ __host__ __device__ __forceinline__ thresh_binary_inv_func() {}
+ __host__ __device__ __forceinline__ thresh_binary_inv_func(const thresh_binary_inv_func& other)
+ : thresh(other.thresh), maxVal(other.maxVal) {}
+
+ const T thresh;
+ const T maxVal;
+ };
+
+ template <typename T> struct thresh_trunc_func : unary_function<T, T>
+ {
+ explicit __host__ __device__ __forceinline__ thresh_trunc_func(T thresh_, T maxVal_ = 0) : thresh(thresh_) {(void)maxVal_;}
+
+ __device__ __forceinline__ T operator()(typename TypeTraits<T>::ParameterType src) const
+ {
+ return minimum<T>()(src, thresh);
+ }
+
+ __host__ __device__ __forceinline__ thresh_trunc_func() {}
+ __host__ __device__ __forceinline__ thresh_trunc_func(const thresh_trunc_func& other)
+ : thresh(other.thresh) {}
+
+ const T thresh;
+ };
+
+ template <typename T> struct thresh_to_zero_func : unary_function<T, T>
+ {
+ explicit __host__ __device__ __forceinline__ thresh_to_zero_func(T thresh_, T maxVal_ = 0) : thresh(thresh_) {(void)maxVal_;}
+
+ __device__ __forceinline__ T operator()(typename TypeTraits<T>::ParameterType src) const
+ {
+ return (src > thresh) * src;
+ }
+
+ __host__ __device__ __forceinline__ thresh_to_zero_func() {}
+ __host__ __device__ __forceinline__ thresh_to_zero_func(const thresh_to_zero_func& other)
+ : thresh(other.thresh) {}
+
+ const T thresh;
+ };
+
+ template <typename T> struct thresh_to_zero_inv_func : unary_function<T, T>
+ {
+ explicit __host__ __device__ __forceinline__ thresh_to_zero_inv_func(T thresh_, T maxVal_ = 0) : thresh(thresh_) {(void)maxVal_;}
+
+ __device__ __forceinline__ T operator()(typename TypeTraits<T>::ParameterType src) const
+ {
+ return (src <= thresh) * src;
+ }
+
+ __host__ __device__ __forceinline__ thresh_to_zero_inv_func() {}
+ __host__ __device__ __forceinline__ thresh_to_zero_inv_func(const thresh_to_zero_inv_func& other)
+ : thresh(other.thresh) {}
+
+ const T thresh;
+ };
+
+ // Function Object Adaptors
+ template <typename Predicate> struct unary_negate : unary_function<typename Predicate::argument_type, bool>
+ {
+ explicit __host__ __device__ __forceinline__ unary_negate(const Predicate& p) : pred(p) {}
+
+ __device__ __forceinline__ bool operator()(typename TypeTraits<typename Predicate::argument_type>::ParameterType x) const
+ {
+ return !pred(x);
+ }
+
+ __host__ __device__ __forceinline__ unary_negate() {}
+ __host__ __device__ __forceinline__ unary_negate(const unary_negate& other) : pred(other.pred) {}
+
+ const Predicate pred;
+ };
+
+ template <typename Predicate> __host__ __device__ __forceinline__ unary_negate<Predicate> not1(const Predicate& pred)
+ {
+ return unary_negate<Predicate>(pred);
+ }
+
+ template <typename Predicate> struct binary_negate : binary_function<typename Predicate::first_argument_type, typename Predicate::second_argument_type, bool>
+ {
+ explicit __host__ __device__ __forceinline__ binary_negate(const Predicate& p) : pred(p) {}
+
+ __device__ __forceinline__ bool operator()(typename TypeTraits<typename Predicate::first_argument_type>::ParameterType x,
+ typename TypeTraits<typename Predicate::second_argument_type>::ParameterType y) const
+ {
+ return !pred(x,y);
+ }
+
+ __host__ __device__ __forceinline__ binary_negate() {}
+ __host__ __device__ __forceinline__ binary_negate(const binary_negate& other) : pred(other.pred) {}
+
+ const Predicate pred;
+ };
+
+ template <typename BinaryPredicate> __host__ __device__ __forceinline__ binary_negate<BinaryPredicate> not2(const BinaryPredicate& pred)
+ {
+ return binary_negate<BinaryPredicate>(pred);
+ }
+
+ template <typename Op> struct binder1st : unary_function<typename Op::second_argument_type, typename Op::result_type>
+ {
+ __host__ __device__ __forceinline__ binder1st(const Op& op_, const typename Op::first_argument_type& arg1_) : op(op_), arg1(arg1_) {}
+
+ __device__ __forceinline__ typename Op::result_type operator ()(typename TypeTraits<typename Op::second_argument_type>::ParameterType a) const
+ {
+ return op(arg1, a);
+ }
+
+ __host__ __device__ __forceinline__ binder1st() {}
+ __host__ __device__ __forceinline__ binder1st(const binder1st& other) : op(other.op), arg1(other.arg1) {}
+
+ const Op op;
+ const typename Op::first_argument_type arg1;
+ };
+
+ template <typename Op, typename T> __host__ __device__ __forceinline__ binder1st<Op> bind1st(const Op& op, const T& x)
+ {
+ return binder1st<Op>(op, typename Op::first_argument_type(x));
+ }
+
+ template <typename Op> struct binder2nd : unary_function<typename Op::first_argument_type, typename Op::result_type>
+ {
+ __host__ __device__ __forceinline__ binder2nd(const Op& op_, const typename Op::second_argument_type& arg2_) : op(op_), arg2(arg2_) {}
+
+ __forceinline__ __device__ typename Op::result_type operator ()(typename TypeTraits<typename Op::first_argument_type>::ParameterType a) const
+ {
+ return op(a, arg2);
+ }
+
+ __host__ __device__ __forceinline__ binder2nd() {}
+ __host__ __device__ __forceinline__ binder2nd(const binder2nd& other) : op(other.op), arg2(other.arg2) {}
+
+ const Op op;
+ const typename Op::second_argument_type arg2;
+ };
+
+ template <typename Op, typename T> __host__ __device__ __forceinline__ binder2nd<Op> bind2nd(const Op& op, const T& x)
+ {
+ return binder2nd<Op>(op, typename Op::second_argument_type(x));
+ }
+
+ // Functor Traits
+ template <typename F> struct IsUnaryFunction
+ {
+ typedef char Yes;
+ struct No {Yes a[2];};
+
+ template <typename T, typename D> static Yes check(unary_function<T, D>);
+ static No check(...);
+
+ static F makeF();
+
+ enum { value = (sizeof(check(makeF())) == sizeof(Yes)) };
+ };
+
+ template <typename F> struct IsBinaryFunction
+ {
+ typedef char Yes;
+ struct No {Yes a[2];};
+
+ template <typename T1, typename T2, typename D> static Yes check(binary_function<T1, T2, D>);
+ static No check(...);
+
+ static F makeF();
+
+ enum { value = (sizeof(check(makeF())) == sizeof(Yes)) };
+ };
+
+ namespace functional_detail
+ {
+ template <size_t src_elem_size, size_t dst_elem_size> struct UnOpShift { enum { shift = 1 }; };
+ template <size_t src_elem_size> struct UnOpShift<src_elem_size, 1> { enum { shift = 4 }; };
+ template <size_t src_elem_size> struct UnOpShift<src_elem_size, 2> { enum { shift = 2 }; };
+
+ template <typename T, typename D> struct DefaultUnaryShift
+ {
+ enum { shift = UnOpShift<sizeof(T), sizeof(D)>::shift };
+ };
+
+ template <size_t src_elem_size1, size_t src_elem_size2, size_t dst_elem_size> struct BinOpShift { enum { shift = 1 }; };
+ template <size_t src_elem_size1, size_t src_elem_size2> struct BinOpShift<src_elem_size1, src_elem_size2, 1> { enum { shift = 4 }; };
+ template <size_t src_elem_size1, size_t src_elem_size2> struct BinOpShift<src_elem_size1, src_elem_size2, 2> { enum { shift = 2 }; };
+
+ template <typename T1, typename T2, typename D> struct DefaultBinaryShift
+ {
+ enum { shift = BinOpShift<sizeof(T1), sizeof(T2), sizeof(D)>::shift };
+ };
+
+ template <typename Func, bool unary = IsUnaryFunction<Func>::value> struct ShiftDispatcher;
+ template <typename Func> struct ShiftDispatcher<Func, true>
+ {
+ enum { shift = DefaultUnaryShift<typename Func::argument_type, typename Func::result_type>::shift };
+ };
+ template <typename Func> struct ShiftDispatcher<Func, false>
+ {
+ enum { shift = DefaultBinaryShift<typename Func::first_argument_type, typename Func::second_argument_type, typename Func::result_type>::shift };
+ };
+ }
+
+ template <typename Func> struct DefaultTransformShift
+ {
+ enum { shift = functional_detail::ShiftDispatcher<Func>::shift };
+ };
+
+ template <typename Func> struct DefaultTransformFunctorTraits
+ {
+ enum { simple_block_dim_x = 16 };
+ enum { simple_block_dim_y = 16 };
+
+ enum { smart_block_dim_x = 16 };
+ enum { smart_block_dim_y = 16 };
+ enum { smart_shift = DefaultTransformShift<Func>::shift };
+ };
+
+ template <typename Func> struct TransformFunctorTraits : DefaultTransformFunctorTraits<Func> {};
+
+#define OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(type) \
+ template <> struct TransformFunctorTraits< type > : DefaultTransformFunctorTraits< type >
+}}} // namespace cv { namespace gpu { namespace device
+
+#endif // __OPENCV_GPU_FUNCTIONAL_HPP__
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/limits.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/limits.hpp
new file mode 100644
index 00000000..59597800
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/limits.hpp
@@ -0,0 +1,122 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_GPU_LIMITS_GPU_HPP__
+#define __OPENCV_GPU_LIMITS_GPU_HPP__
+
+#include <limits.h>
+#include <float.h>
+#include "common.hpp"
+
+namespace cv { namespace gpu { namespace device
+{
+
+template <class T> struct numeric_limits;
+
+template <> struct numeric_limits<bool>
+{
+ __device__ __forceinline__ static bool min() { return false; }
+ __device__ __forceinline__ static bool max() { return true; }
+ static const bool is_signed = false;
+};
+
+template <> struct numeric_limits<signed char>
+{
+ __device__ __forceinline__ static signed char min() { return SCHAR_MIN; }
+ __device__ __forceinline__ static signed char max() { return SCHAR_MAX; }
+ static const bool is_signed = true;
+};
+
+template <> struct numeric_limits<unsigned char>
+{
+ __device__ __forceinline__ static unsigned char min() { return 0; }
+ __device__ __forceinline__ static unsigned char max() { return UCHAR_MAX; }
+ static const bool is_signed = false;
+};
+
+template <> struct numeric_limits<short>
+{
+ __device__ __forceinline__ static short min() { return SHRT_MIN; }
+ __device__ __forceinline__ static short max() { return SHRT_MAX; }
+ static const bool is_signed = true;
+};
+
+template <> struct numeric_limits<unsigned short>
+{
+ __device__ __forceinline__ static unsigned short min() { return 0; }
+ __device__ __forceinline__ static unsigned short max() { return USHRT_MAX; }
+ static const bool is_signed = false;
+};
+
+template <> struct numeric_limits<int>
+{
+ __device__ __forceinline__ static int min() { return INT_MIN; }
+ __device__ __forceinline__ static int max() { return INT_MAX; }
+ static const bool is_signed = true;
+};
+
+template <> struct numeric_limits<unsigned int>
+{
+ __device__ __forceinline__ static unsigned int min() { return 0; }
+ __device__ __forceinline__ static unsigned int max() { return UINT_MAX; }
+ static const bool is_signed = false;
+};
+
+template <> struct numeric_limits<float>
+{
+ __device__ __forceinline__ static float min() { return FLT_MIN; }
+ __device__ __forceinline__ static float max() { return FLT_MAX; }
+ __device__ __forceinline__ static float epsilon() { return FLT_EPSILON; }
+ static const bool is_signed = true;
+};
+
+template <> struct numeric_limits<double>
+{
+ __device__ __forceinline__ static double min() { return DBL_MIN; }
+ __device__ __forceinline__ static double max() { return DBL_MAX; }
+ __device__ __forceinline__ static double epsilon() { return DBL_EPSILON; }
+ static const bool is_signed = true;
+};
+
+}}} // namespace cv { namespace gpu { namespace device {
+
+#endif // __OPENCV_GPU_LIMITS_GPU_HPP__
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/reduce.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/reduce.hpp
new file mode 100644
index 00000000..2161b064
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/reduce.hpp
@@ -0,0 +1,197 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_GPU_REDUCE_HPP__
+#define __OPENCV_GPU_REDUCE_HPP__
+
+#include <thrust/tuple.h>
+#include "detail/reduce.hpp"
+#include "detail/reduce_key_val.hpp"
+
+namespace cv { namespace gpu { namespace device
+{
+ template <int N, typename T, class Op>
+ __device__ __forceinline__ void reduce(volatile T* smem, T& val, unsigned int tid, const Op& op)
+ {
+ reduce_detail::Dispatcher<N>::reductor::template reduce<volatile T*, T&, const Op&>(smem, val, tid, op);
+ }
+ template <int N,
+ typename P0, typename P1, typename P2, typename P3, typename P4, typename P5, typename P6, typename P7, typename P8, typename P9,
+ typename R0, typename R1, typename R2, typename R3, typename R4, typename R5, typename R6, typename R7, typename R8, typename R9,
+ class Op0, class Op1, class Op2, class Op3, class Op4, class Op5, class Op6, class Op7, class Op8, class Op9>
+ __device__ __forceinline__ void reduce(const thrust::tuple<P0, P1, P2, P3, P4, P5, P6, P7, P8, P9>& smem,
+ const thrust::tuple<R0, R1, R2, R3, R4, R5, R6, R7, R8, R9>& val,
+ unsigned int tid,
+ const thrust::tuple<Op0, Op1, Op2, Op3, Op4, Op5, Op6, Op7, Op8, Op9>& op)
+ {
+ reduce_detail::Dispatcher<N>::reductor::template reduce<
+ const thrust::tuple<P0, P1, P2, P3, P4, P5, P6, P7, P8, P9>&,
+ const thrust::tuple<R0, R1, R2, R3, R4, R5, R6, R7, R8, R9>&,
+ const thrust::tuple<Op0, Op1, Op2, Op3, Op4, Op5, Op6, Op7, Op8, Op9>&>(smem, val, tid, op);
+ }
+
+ template <unsigned int N, typename K, typename V, class Cmp>
+ __device__ __forceinline__ void reduceKeyVal(volatile K* skeys, K& key, volatile V* svals, V& val, unsigned int tid, const Cmp& cmp)
+ {
+ reduce_key_val_detail::Dispatcher<N>::reductor::template reduce<volatile K*, K&, volatile V*, V&, const Cmp&>(skeys, key, svals, val, tid, cmp);
+ }
+ template <unsigned int N,
+ typename K,
+ typename VP0, typename VP1, typename VP2, typename VP3, typename VP4, typename VP5, typename VP6, typename VP7, typename VP8, typename VP9,
+ typename VR0, typename VR1, typename VR2, typename VR3, typename VR4, typename VR5, typename VR6, typename VR7, typename VR8, typename VR9,
+ class Cmp>
+ __device__ __forceinline__ void reduceKeyVal(volatile K* skeys, K& key,
+ const thrust::tuple<VP0, VP1, VP2, VP3, VP4, VP5, VP6, VP7, VP8, VP9>& svals,
+ const thrust::tuple<VR0, VR1, VR2, VR3, VR4, VR5, VR6, VR7, VR8, VR9>& val,
+ unsigned int tid, const Cmp& cmp)
+ {
+ reduce_key_val_detail::Dispatcher<N>::reductor::template reduce<volatile K*, K&,
+ const thrust::tuple<VP0, VP1, VP2, VP3, VP4, VP5, VP6, VP7, VP8, VP9>&,
+ const thrust::tuple<VR0, VR1, VR2, VR3, VR4, VR5, VR6, VR7, VR8, VR9>&,
+ const Cmp&>(skeys, key, svals, val, tid, cmp);
+ }
+ template <unsigned int N,
+ typename KP0, typename KP1, typename KP2, typename KP3, typename KP4, typename KP5, typename KP6, typename KP7, typename KP8, typename KP9,
+ typename KR0, typename KR1, typename KR2, typename KR3, typename KR4, typename KR5, typename KR6, typename KR7, typename KR8, typename KR9,
+ typename VP0, typename VP1, typename VP2, typename VP3, typename VP4, typename VP5, typename VP6, typename VP7, typename VP8, typename VP9,
+ typename VR0, typename VR1, typename VR2, typename VR3, typename VR4, typename VR5, typename VR6, typename VR7, typename VR8, typename VR9,
+ class Cmp0, class Cmp1, class Cmp2, class Cmp3, class Cmp4, class Cmp5, class Cmp6, class Cmp7, class Cmp8, class Cmp9>
+ __device__ __forceinline__ void reduceKeyVal(const thrust::tuple<KP0, KP1, KP2, KP3, KP4, KP5, KP6, KP7, KP8, KP9>& skeys,
+ const thrust::tuple<KR0, KR1, KR2, KR3, KR4, KR5, KR6, KR7, KR8, KR9>& key,
+ const thrust::tuple<VP0, VP1, VP2, VP3, VP4, VP5, VP6, VP7, VP8, VP9>& svals,
+ const thrust::tuple<VR0, VR1, VR2, VR3, VR4, VR5, VR6, VR7, VR8, VR9>& val,
+ unsigned int tid,
+ const thrust::tuple<Cmp0, Cmp1, Cmp2, Cmp3, Cmp4, Cmp5, Cmp6, Cmp7, Cmp8, Cmp9>& cmp)
+ {
+ reduce_key_val_detail::Dispatcher<N>::reductor::template reduce<
+ const thrust::tuple<KP0, KP1, KP2, KP3, KP4, KP5, KP6, KP7, KP8, KP9>&,
+ const thrust::tuple<KR0, KR1, KR2, KR3, KR4, KR5, KR6, KR7, KR8, KR9>&,
+ const thrust::tuple<VP0, VP1, VP2, VP3, VP4, VP5, VP6, VP7, VP8, VP9>&,
+ const thrust::tuple<VR0, VR1, VR2, VR3, VR4, VR5, VR6, VR7, VR8, VR9>&,
+ const thrust::tuple<Cmp0, Cmp1, Cmp2, Cmp3, Cmp4, Cmp5, Cmp6, Cmp7, Cmp8, Cmp9>&
+ >(skeys, key, svals, val, tid, cmp);
+ }
+
+ // smem_tuple
+
+ template <typename T0>
+ __device__ __forceinline__
+ thrust::tuple<volatile T0*>
+ smem_tuple(T0* t0)
+ {
+ return thrust::make_tuple((volatile T0*) t0);
+ }
+
+ template <typename T0, typename T1>
+ __device__ __forceinline__
+ thrust::tuple<volatile T0*, volatile T1*>
+ smem_tuple(T0* t0, T1* t1)
+ {
+ return thrust::make_tuple((volatile T0*) t0, (volatile T1*) t1);
+ }
+
+ template <typename T0, typename T1, typename T2>
+ __device__ __forceinline__
+ thrust::tuple<volatile T0*, volatile T1*, volatile T2*>
+ smem_tuple(T0* t0, T1* t1, T2* t2)
+ {
+ return thrust::make_tuple((volatile T0*) t0, (volatile T1*) t1, (volatile T2*) t2);
+ }
+
+ template <typename T0, typename T1, typename T2, typename T3>
+ __device__ __forceinline__
+ thrust::tuple<volatile T0*, volatile T1*, volatile T2*, volatile T3*>
+ smem_tuple(T0* t0, T1* t1, T2* t2, T3* t3)
+ {
+ return thrust::make_tuple((volatile T0*) t0, (volatile T1*) t1, (volatile T2*) t2, (volatile T3*) t3);
+ }
+
+ template <typename T0, typename T1, typename T2, typename T3, typename T4>
+ __device__ __forceinline__
+ thrust::tuple<volatile T0*, volatile T1*, volatile T2*, volatile T3*, volatile T4*>
+ smem_tuple(T0* t0, T1* t1, T2* t2, T3* t3, T4* t4)
+ {
+ return thrust::make_tuple((volatile T0*) t0, (volatile T1*) t1, (volatile T2*) t2, (volatile T3*) t3, (volatile T4*) t4);
+ }
+
+ template <typename T0, typename T1, typename T2, typename T3, typename T4, typename T5>
+ __device__ __forceinline__
+ thrust::tuple<volatile T0*, volatile T1*, volatile T2*, volatile T3*, volatile T4*, volatile T5*>
+ smem_tuple(T0* t0, T1* t1, T2* t2, T3* t3, T4* t4, T5* t5)
+ {
+ return thrust::make_tuple((volatile T0*) t0, (volatile T1*) t1, (volatile T2*) t2, (volatile T3*) t3, (volatile T4*) t4, (volatile T5*) t5);
+ }
+
+ template <typename T0, typename T1, typename T2, typename T3, typename T4, typename T5, typename T6>
+ __device__ __forceinline__
+ thrust::tuple<volatile T0*, volatile T1*, volatile T2*, volatile T3*, volatile T4*, volatile T5*, volatile T6*>
+ smem_tuple(T0* t0, T1* t1, T2* t2, T3* t3, T4* t4, T5* t5, T6* t6)
+ {
+ return thrust::make_tuple((volatile T0*) t0, (volatile T1*) t1, (volatile T2*) t2, (volatile T3*) t3, (volatile T4*) t4, (volatile T5*) t5, (volatile T6*) t6);
+ }
+
+ template <typename T0, typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7>
+ __device__ __forceinline__
+ thrust::tuple<volatile T0*, volatile T1*, volatile T2*, volatile T3*, volatile T4*, volatile T5*, volatile T6*, volatile T7*>
+ smem_tuple(T0* t0, T1* t1, T2* t2, T3* t3, T4* t4, T5* t5, T6* t6, T7* t7)
+ {
+ return thrust::make_tuple((volatile T0*) t0, (volatile T1*) t1, (volatile T2*) t2, (volatile T3*) t3, (volatile T4*) t4, (volatile T5*) t5, (volatile T6*) t6, (volatile T7*) t7);
+ }
+
+ template <typename T0, typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8>
+ __device__ __forceinline__
+ thrust::tuple<volatile T0*, volatile T1*, volatile T2*, volatile T3*, volatile T4*, volatile T5*, volatile T6*, volatile T7*, volatile T8*>
+ smem_tuple(T0* t0, T1* t1, T2* t2, T3* t3, T4* t4, T5* t5, T6* t6, T7* t7, T8* t8)
+ {
+ return thrust::make_tuple((volatile T0*) t0, (volatile T1*) t1, (volatile T2*) t2, (volatile T3*) t3, (volatile T4*) t4, (volatile T5*) t5, (volatile T6*) t6, (volatile T7*) t7, (volatile T8*) t8);
+ }
+
+ template <typename T0, typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9>
+ __device__ __forceinline__
+ thrust::tuple<volatile T0*, volatile T1*, volatile T2*, volatile T3*, volatile T4*, volatile T5*, volatile T6*, volatile T7*, volatile T8*, volatile T9*>
+ smem_tuple(T0* t0, T1* t1, T2* t2, T3* t3, T4* t4, T5* t5, T6* t6, T7* t7, T8* t8, T9* t9)
+ {
+ return thrust::make_tuple((volatile T0*) t0, (volatile T1*) t1, (volatile T2*) t2, (volatile T3*) t3, (volatile T4*) t4, (volatile T5*) t5, (volatile T6*) t6, (volatile T7*) t7, (volatile T8*) t8, (volatile T9*) t9);
+ }
+}}}
+
+#endif // __OPENCV_GPU_UTILITY_HPP__
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/saturate_cast.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/saturate_cast.hpp
new file mode 100644
index 00000000..7a2799fa
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/saturate_cast.hpp
@@ -0,0 +1,284 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_GPU_SATURATE_CAST_HPP__
+#define __OPENCV_GPU_SATURATE_CAST_HPP__
+
+#include "common.hpp"
+
+namespace cv { namespace gpu { namespace device
+{
+ template<typename _Tp> __device__ __forceinline__ _Tp saturate_cast(uchar v) { return _Tp(v); }
+ template<typename _Tp> __device__ __forceinline__ _Tp saturate_cast(schar v) { return _Tp(v); }
+ template<typename _Tp> __device__ __forceinline__ _Tp saturate_cast(ushort v) { return _Tp(v); }
+ template<typename _Tp> __device__ __forceinline__ _Tp saturate_cast(short v) { return _Tp(v); }
+ template<typename _Tp> __device__ __forceinline__ _Tp saturate_cast(uint v) { return _Tp(v); }
+ template<typename _Tp> __device__ __forceinline__ _Tp saturate_cast(int v) { return _Tp(v); }
+ template<typename _Tp> __device__ __forceinline__ _Tp saturate_cast(float v) { return _Tp(v); }
+ template<typename _Tp> __device__ __forceinline__ _Tp saturate_cast(double v) { return _Tp(v); }
+
+ template<> __device__ __forceinline__ uchar saturate_cast<uchar>(schar v)
+ {
+ uint res = 0;
+ int vi = v;
+ asm("cvt.sat.u8.s8 %0, %1;" : "=r"(res) : "r"(vi));
+ return res;
+ }
+ template<> __device__ __forceinline__ uchar saturate_cast<uchar>(short v)
+ {
+ uint res = 0;
+ asm("cvt.sat.u8.s16 %0, %1;" : "=r"(res) : "h"(v));
+ return res;
+ }
+ template<> __device__ __forceinline__ uchar saturate_cast<uchar>(ushort v)
+ {
+ uint res = 0;
+ asm("cvt.sat.u8.u16 %0, %1;" : "=r"(res) : "h"(v));
+ return res;
+ }
+ template<> __device__ __forceinline__ uchar saturate_cast<uchar>(int v)
+ {
+ uint res = 0;
+ asm("cvt.sat.u8.s32 %0, %1;" : "=r"(res) : "r"(v));
+ return res;
+ }
+ template<> __device__ __forceinline__ uchar saturate_cast<uchar>(uint v)
+ {
+ uint res = 0;
+ asm("cvt.sat.u8.u32 %0, %1;" : "=r"(res) : "r"(v));
+ return res;
+ }
+ template<> __device__ __forceinline__ uchar saturate_cast<uchar>(float v)
+ {
+ uint res = 0;
+ asm("cvt.rni.sat.u8.f32 %0, %1;" : "=r"(res) : "f"(v));
+ return res;
+ }
+ template<> __device__ __forceinline__ uchar saturate_cast<uchar>(double v)
+ {
+ #if __CUDA_ARCH__ >= 130
+ uint res = 0;
+ asm("cvt.rni.sat.u8.f64 %0, %1;" : "=r"(res) : "d"(v));
+ return res;
+ #else
+ return saturate_cast<uchar>((float)v);
+ #endif
+ }
+
+ template<> __device__ __forceinline__ schar saturate_cast<schar>(uchar v)
+ {
+ uint res = 0;
+ uint vi = v;
+ asm("cvt.sat.s8.u8 %0, %1;" : "=r"(res) : "r"(vi));
+ return res;
+ }
+ template<> __device__ __forceinline__ schar saturate_cast<schar>(short v)
+ {
+ uint res = 0;
+ asm("cvt.sat.s8.s16 %0, %1;" : "=r"(res) : "h"(v));
+ return res;
+ }
+ template<> __device__ __forceinline__ schar saturate_cast<schar>(ushort v)
+ {
+ uint res = 0;
+ asm("cvt.sat.s8.u16 %0, %1;" : "=r"(res) : "h"(v));
+ return res;
+ }
+ template<> __device__ __forceinline__ schar saturate_cast<schar>(int v)
+ {
+ uint res = 0;
+ asm("cvt.sat.s8.s32 %0, %1;" : "=r"(res) : "r"(v));
+ return res;
+ }
+ template<> __device__ __forceinline__ schar saturate_cast<schar>(uint v)
+ {
+ uint res = 0;
+ asm("cvt.sat.s8.u32 %0, %1;" : "=r"(res) : "r"(v));
+ return res;
+ }
+ template<> __device__ __forceinline__ schar saturate_cast<schar>(float v)
+ {
+ uint res = 0;
+ asm("cvt.rni.sat.s8.f32 %0, %1;" : "=r"(res) : "f"(v));
+ return res;
+ }
+ template<> __device__ __forceinline__ schar saturate_cast<schar>(double v)
+ {
+ #if __CUDA_ARCH__ >= 130
+ uint res = 0;
+ asm("cvt.rni.sat.s8.f64 %0, %1;" : "=r"(res) : "d"(v));
+ return res;
+ #else
+ return saturate_cast<schar>((float)v);
+ #endif
+ }
+
+ template<> __device__ __forceinline__ ushort saturate_cast<ushort>(schar v)
+ {
+ ushort res = 0;
+ int vi = v;
+ asm("cvt.sat.u16.s8 %0, %1;" : "=h"(res) : "r"(vi));
+ return res;
+ }
+ template<> __device__ __forceinline__ ushort saturate_cast<ushort>(short v)
+ {
+ ushort res = 0;
+ asm("cvt.sat.u16.s16 %0, %1;" : "=h"(res) : "h"(v));
+ return res;
+ }
+ template<> __device__ __forceinline__ ushort saturate_cast<ushort>(int v)
+ {
+ ushort res = 0;
+ asm("cvt.sat.u16.s32 %0, %1;" : "=h"(res) : "r"(v));
+ return res;
+ }
+ template<> __device__ __forceinline__ ushort saturate_cast<ushort>(uint v)
+ {
+ ushort res = 0;
+ asm("cvt.sat.u16.u32 %0, %1;" : "=h"(res) : "r"(v));
+ return res;
+ }
+ template<> __device__ __forceinline__ ushort saturate_cast<ushort>(float v)
+ {
+ ushort res = 0;
+ asm("cvt.rni.sat.u16.f32 %0, %1;" : "=h"(res) : "f"(v));
+ return res;
+ }
+ template<> __device__ __forceinline__ ushort saturate_cast<ushort>(double v)
+ {
+ #if __CUDA_ARCH__ >= 130
+ ushort res = 0;
+ asm("cvt.rni.sat.u16.f64 %0, %1;" : "=h"(res) : "d"(v));
+ return res;
+ #else
+ return saturate_cast<ushort>((float)v);
+ #endif
+ }
+
+ template<> __device__ __forceinline__ short saturate_cast<short>(ushort v)
+ {
+ short res = 0;
+ asm("cvt.sat.s16.u16 %0, %1;" : "=h"(res) : "h"(v));
+ return res;
+ }
+ template<> __device__ __forceinline__ short saturate_cast<short>(int v)
+ {
+ short res = 0;
+ asm("cvt.sat.s16.s32 %0, %1;" : "=h"(res) : "r"(v));
+ return res;
+ }
+ template<> __device__ __forceinline__ short saturate_cast<short>(uint v)
+ {
+ short res = 0;
+ asm("cvt.sat.s16.u32 %0, %1;" : "=h"(res) : "r"(v));
+ return res;
+ }
+ template<> __device__ __forceinline__ short saturate_cast<short>(float v)
+ {
+ short res = 0;
+ asm("cvt.rni.sat.s16.f32 %0, %1;" : "=h"(res) : "f"(v));
+ return res;
+ }
+ template<> __device__ __forceinline__ short saturate_cast<short>(double v)
+ {
+ #if __CUDA_ARCH__ >= 130
+ short res = 0;
+ asm("cvt.rni.sat.s16.f64 %0, %1;" : "=h"(res) : "d"(v));
+ return res;
+ #else
+ return saturate_cast<short>((float)v);
+ #endif
+ }
+
+ template<> __device__ __forceinline__ int saturate_cast<int>(uint v)
+ {
+ int res = 0;
+ asm("cvt.sat.s32.u32 %0, %1;" : "=r"(res) : "r"(v));
+ return res;
+ }
+ template<> __device__ __forceinline__ int saturate_cast<int>(float v)
+ {
+ return __float2int_rn(v);
+ }
+ template<> __device__ __forceinline__ int saturate_cast<int>(double v)
+ {
+ #if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 130
+ return __double2int_rn(v);
+ #else
+ return saturate_cast<int>((float)v);
+ #endif
+ }
+
+ template<> __device__ __forceinline__ uint saturate_cast<uint>(schar v)
+ {
+ uint res = 0;
+ int vi = v;
+ asm("cvt.sat.u32.s8 %0, %1;" : "=r"(res) : "r"(vi));
+ return res;
+ }
+ template<> __device__ __forceinline__ uint saturate_cast<uint>(short v)
+ {
+ uint res = 0;
+ asm("cvt.sat.u32.s16 %0, %1;" : "=r"(res) : "h"(v));
+ return res;
+ }
+ template<> __device__ __forceinline__ uint saturate_cast<uint>(int v)
+ {
+ uint res = 0;
+ asm("cvt.sat.u32.s32 %0, %1;" : "=r"(res) : "r"(v));
+ return res;
+ }
+ template<> __device__ __forceinline__ uint saturate_cast<uint>(float v)
+ {
+ return __float2uint_rn(v);
+ }
+ template<> __device__ __forceinline__ uint saturate_cast<uint>(double v)
+ {
+ #if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 130
+ return __double2uint_rn(v);
+ #else
+ return saturate_cast<uint>((float)v);
+ #endif
+ }
+}}}
+
+#endif /* __OPENCV_GPU_SATURATE_CAST_HPP__ */
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/scan.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/scan.hpp
new file mode 100644
index 00000000..3d8da16f
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/scan.hpp
@@ -0,0 +1,250 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_GPU_SCAN_HPP__
+#define __OPENCV_GPU_SCAN_HPP__
+
+#include "opencv2/gpu/device/common.hpp"
+#include "opencv2/gpu/device/utility.hpp"
+#include "opencv2/gpu/device/warp.hpp"
+#include "opencv2/gpu/device/warp_shuffle.hpp"
+
+namespace cv { namespace gpu { namespace device
+{
+ enum ScanKind { EXCLUSIVE = 0, INCLUSIVE = 1 };
+
+ template <ScanKind Kind, typename T, typename F> struct WarpScan
+ {
+ __device__ __forceinline__ WarpScan() {}
+ __device__ __forceinline__ WarpScan(const WarpScan& other) { (void)other; }
+
+ __device__ __forceinline__ T operator()( volatile T *ptr , const unsigned int idx)
+ {
+ const unsigned int lane = idx & 31;
+ F op;
+
+ if ( lane >= 1) ptr [idx ] = op(ptr [idx - 1], ptr [idx]);
+ if ( lane >= 2) ptr [idx ] = op(ptr [idx - 2], ptr [idx]);
+ if ( lane >= 4) ptr [idx ] = op(ptr [idx - 4], ptr [idx]);
+ if ( lane >= 8) ptr [idx ] = op(ptr [idx - 8], ptr [idx]);
+ if ( lane >= 16) ptr [idx ] = op(ptr [idx - 16], ptr [idx]);
+
+ if( Kind == INCLUSIVE )
+ return ptr [idx];
+ else
+ return (lane > 0) ? ptr [idx - 1] : 0;
+ }
+
+ __device__ __forceinline__ unsigned int index(const unsigned int tid)
+ {
+ return tid;
+ }
+
+ __device__ __forceinline__ void init(volatile T *ptr){}
+
+ static const int warp_offset = 0;
+
+ typedef WarpScan<INCLUSIVE, T, F> merge;
+ };
+
+ template <ScanKind Kind , typename T, typename F> struct WarpScanNoComp
+ {
+ __device__ __forceinline__ WarpScanNoComp() {}
+ __device__ __forceinline__ WarpScanNoComp(const WarpScanNoComp& other) { (void)other; }
+
+ __device__ __forceinline__ T operator()( volatile T *ptr , const unsigned int idx)
+ {
+ const unsigned int lane = threadIdx.x & 31;
+ F op;
+
+ ptr [idx ] = op(ptr [idx - 1], ptr [idx]);
+ ptr [idx ] = op(ptr [idx - 2], ptr [idx]);
+ ptr [idx ] = op(ptr [idx - 4], ptr [idx]);
+ ptr [idx ] = op(ptr [idx - 8], ptr [idx]);
+ ptr [idx ] = op(ptr [idx - 16], ptr [idx]);
+
+ if( Kind == INCLUSIVE )
+ return ptr [idx];
+ else
+ return (lane > 0) ? ptr [idx - 1] : 0;
+ }
+
+ __device__ __forceinline__ unsigned int index(const unsigned int tid)
+ {
+ return (tid >> warp_log) * warp_smem_stride + 16 + (tid & warp_mask);
+ }
+
+ __device__ __forceinline__ void init(volatile T *ptr)
+ {
+ ptr[threadIdx.x] = 0;
+ }
+
+ static const int warp_smem_stride = 32 + 16 + 1;
+ static const int warp_offset = 16;
+ static const int warp_log = 5;
+ static const int warp_mask = 31;
+
+ typedef WarpScanNoComp<INCLUSIVE, T, F> merge;
+ };
+
+ template <ScanKind Kind , typename T, typename Sc, typename F> struct BlockScan
+ {
+ __device__ __forceinline__ BlockScan() {}
+ __device__ __forceinline__ BlockScan(const BlockScan& other) { (void)other; }
+
+ __device__ __forceinline__ T operator()(volatile T *ptr)
+ {
+ const unsigned int tid = threadIdx.x;
+ const unsigned int lane = tid & warp_mask;
+ const unsigned int warp = tid >> warp_log;
+
+ Sc scan;
+ typename Sc::merge merge_scan;
+ const unsigned int idx = scan.index(tid);
+
+ T val = scan(ptr, idx);
+ __syncthreads ();
+
+ if( warp == 0)
+ scan.init(ptr);
+ __syncthreads ();
+
+ if( lane == 31 )
+ ptr [scan.warp_offset + warp ] = (Kind == INCLUSIVE) ? val : ptr [idx];
+ __syncthreads ();
+
+ if( warp == 0 )
+ merge_scan(ptr, idx);
+ __syncthreads();
+
+ if ( warp > 0)
+ val = ptr [scan.warp_offset + warp - 1] + val;
+ __syncthreads ();
+
+ ptr[idx] = val;
+ __syncthreads ();
+
+ return val ;
+ }
+
+ static const int warp_log = 5;
+ static const int warp_mask = 31;
+ };
+
+ template <typename T>
+ __device__ T warpScanInclusive(T idata, volatile T* s_Data, unsigned int tid)
+ {
+ #if __CUDA_ARCH__ >= 300
+ const unsigned int laneId = cv::gpu::device::Warp::laneId();
+
+ // scan on shuffl functions
+ #pragma unroll
+ for (int i = 1; i <= (OPENCV_GPU_WARP_SIZE / 2); i *= 2)
+ {
+ const T n = cv::gpu::device::shfl_up(idata, i);
+ if (laneId >= i)
+ idata += n;
+ }
+
+ return idata;
+ #else
+ unsigned int pos = 2 * tid - (tid & (OPENCV_GPU_WARP_SIZE - 1));
+ s_Data[pos] = 0;
+ pos += OPENCV_GPU_WARP_SIZE;
+ s_Data[pos] = idata;
+
+ s_Data[pos] += s_Data[pos - 1];
+ s_Data[pos] += s_Data[pos - 2];
+ s_Data[pos] += s_Data[pos - 4];
+ s_Data[pos] += s_Data[pos - 8];
+ s_Data[pos] += s_Data[pos - 16];
+
+ return s_Data[pos];
+ #endif
+ }
+
+ template <typename T>
+ __device__ __forceinline__ T warpScanExclusive(T idata, volatile T* s_Data, unsigned int tid)
+ {
+ return warpScanInclusive(idata, s_Data, tid) - idata;
+ }
+
+ template <int tiNumScanThreads, typename T>
+ __device__ T blockScanInclusive(T idata, volatile T* s_Data, unsigned int tid)
+ {
+ if (tiNumScanThreads > OPENCV_GPU_WARP_SIZE)
+ {
+ //Bottom-level inclusive warp scan
+ T warpResult = warpScanInclusive(idata, s_Data, tid);
+
+ //Save top elements of each warp for exclusive warp scan
+ //sync to wait for warp scans to complete (because s_Data is being overwritten)
+ __syncthreads();
+ if ((tid & (OPENCV_GPU_WARP_SIZE - 1)) == (OPENCV_GPU_WARP_SIZE - 1))
+ {
+ s_Data[tid >> OPENCV_GPU_LOG_WARP_SIZE] = warpResult;
+ }
+
+ //wait for warp scans to complete
+ __syncthreads();
+
+ if (tid < (tiNumScanThreads / OPENCV_GPU_WARP_SIZE) )
+ {
+ //grab top warp elements
+ T val = s_Data[tid];
+ //calculate exclusive scan and write back to shared memory
+ s_Data[tid] = warpScanExclusive(val, s_Data, tid);
+ }
+
+ //return updated warp scans with exclusive scan results
+ __syncthreads();
+
+ return warpResult + s_Data[tid >> OPENCV_GPU_LOG_WARP_SIZE];
+ }
+ else
+ {
+ return warpScanInclusive(idata, s_Data, tid);
+ }
+ }
+}}}
+
+#endif // __OPENCV_GPU_SCAN_HPP__
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/simd_functions.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/simd_functions.hpp
new file mode 100644
index 00000000..b0377e53
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/simd_functions.hpp
@@ -0,0 +1,909 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+/*
+ * Copyright (c) 2013 NVIDIA Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of NVIDIA Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __OPENCV_GPU_SIMD_FUNCTIONS_HPP__
+#define __OPENCV_GPU_SIMD_FUNCTIONS_HPP__
+
+#include "common.hpp"
+
+/*
+ This header file contains inline functions that implement intra-word SIMD
+ operations, that are hardware accelerated on sm_3x (Kepler) GPUs. Efficient
+ emulation code paths are provided for earlier architectures (sm_1x, sm_2x)
+ to make the code portable across all GPUs supported by CUDA. The following
+ functions are currently implemented:
+
+ vadd2(a,b) per-halfword unsigned addition, with wrap-around: a + b
+ vsub2(a,b) per-halfword unsigned subtraction, with wrap-around: a - b
+ vabsdiff2(a,b) per-halfword unsigned absolute difference: |a - b|
+ vavg2(a,b) per-halfword unsigned average: (a + b) / 2
+ vavrg2(a,b) per-halfword unsigned rounded average: (a + b + 1) / 2
+ vseteq2(a,b) per-halfword unsigned comparison: a == b ? 1 : 0
+ vcmpeq2(a,b) per-halfword unsigned comparison: a == b ? 0xffff : 0
+ vsetge2(a,b) per-halfword unsigned comparison: a >= b ? 1 : 0
+ vcmpge2(a,b) per-halfword unsigned comparison: a >= b ? 0xffff : 0
+ vsetgt2(a,b) per-halfword unsigned comparison: a > b ? 1 : 0
+ vcmpgt2(a,b) per-halfword unsigned comparison: a > b ? 0xffff : 0
+ vsetle2(a,b) per-halfword unsigned comparison: a <= b ? 1 : 0
+ vcmple2(a,b) per-halfword unsigned comparison: a <= b ? 0xffff : 0
+ vsetlt2(a,b) per-halfword unsigned comparison: a < b ? 1 : 0
+ vcmplt2(a,b) per-halfword unsigned comparison: a < b ? 0xffff : 0
+ vsetne2(a,b) per-halfword unsigned comparison: a != b ? 1 : 0
+ vcmpne2(a,b) per-halfword unsigned comparison: a != b ? 0xffff : 0
+ vmax2(a,b) per-halfword unsigned maximum: max(a, b)
+ vmin2(a,b) per-halfword unsigned minimum: min(a, b)
+
+ vadd4(a,b) per-byte unsigned addition, with wrap-around: a + b
+ vsub4(a,b) per-byte unsigned subtraction, with wrap-around: a - b
+ vabsdiff4(a,b) per-byte unsigned absolute difference: |a - b|
+ vavg4(a,b) per-byte unsigned average: (a + b) / 2
+ vavrg4(a,b) per-byte unsigned rounded average: (a + b + 1) / 2
+ vseteq4(a,b) per-byte unsigned comparison: a == b ? 1 : 0
+ vcmpeq4(a,b) per-byte unsigned comparison: a == b ? 0xff : 0
+ vsetge4(a,b) per-byte unsigned comparison: a >= b ? 1 : 0
+ vcmpge4(a,b) per-byte unsigned comparison: a >= b ? 0xff : 0
+ vsetgt4(a,b) per-byte unsigned comparison: a > b ? 1 : 0
+ vcmpgt4(a,b) per-byte unsigned comparison: a > b ? 0xff : 0
+ vsetle4(a,b) per-byte unsigned comparison: a <= b ? 1 : 0
+ vcmple4(a,b) per-byte unsigned comparison: a <= b ? 0xff : 0
+ vsetlt4(a,b) per-byte unsigned comparison: a < b ? 1 : 0
+ vcmplt4(a,b) per-byte unsigned comparison: a < b ? 0xff : 0
+ vsetne4(a,b) per-byte unsigned comparison: a != b ? 1: 0
+ vcmpne4(a,b) per-byte unsigned comparison: a != b ? 0xff: 0
+ vmax4(a,b) per-byte unsigned maximum: max(a, b)
+ vmin4(a,b) per-byte unsigned minimum: min(a, b)
+*/
+
+namespace cv { namespace gpu { namespace device
+{
+ // 2
+
+ static __device__ __forceinline__ unsigned int vadd2(unsigned int a, unsigned int b)
+ {
+ unsigned int r = 0;
+
+ #if __CUDA_ARCH__ >= 300
+ asm("vadd2.u32.u32.u32.sat %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
+ #elif __CUDA_ARCH__ >= 200
+ asm("vadd.u32.u32.u32.sat %0.h0, %1.h0, %2.h0, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
+ asm("vadd.u32.u32.u32.sat %0.h1, %1.h1, %2.h1, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
+ #else
+ unsigned int s;
+ s = a ^ b; // sum bits
+ r = a + b; // actual sum
+ s = s ^ r; // determine carry-ins for each bit position
+ s = s & 0x00010000; // carry-in to high word (= carry-out from low word)
+ r = r - s; // subtract out carry-out from low word
+ #endif
+
+ return r;
+ }
+
+ static __device__ __forceinline__ unsigned int vsub2(unsigned int a, unsigned int b)
+ {
+ unsigned int r = 0;
+
+ #if __CUDA_ARCH__ >= 300
+ asm("vsub2.u32.u32.u32.sat %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
+ #elif __CUDA_ARCH__ >= 200
+ asm("vsub.u32.u32.u32.sat %0.h0, %1.h0, %2.h0, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
+ asm("vsub.u32.u32.u32.sat %0.h1, %1.h1, %2.h1, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
+ #else
+ unsigned int s;
+ s = a ^ b; // sum bits
+ r = a - b; // actual sum
+ s = s ^ r; // determine carry-ins for each bit position
+ s = s & 0x00010000; // borrow to high word
+ r = r + s; // compensate for borrow from low word
+ #endif
+
+ return r;
+ }
+
+ static __device__ __forceinline__ unsigned int vabsdiff2(unsigned int a, unsigned int b)
+ {
+ unsigned int r = 0;
+
+ #if __CUDA_ARCH__ >= 300
+ asm("vabsdiff2.u32.u32.u32.sat %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
+ #elif __CUDA_ARCH__ >= 200
+ asm("vabsdiff.u32.u32.u32.sat %0.h0, %1.h0, %2.h0, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
+ asm("vabsdiff.u32.u32.u32.sat %0.h1, %1.h1, %2.h1, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
+ #else
+ unsigned int s, t, u, v;
+ s = a & 0x0000ffff; // extract low halfword
+ r = b & 0x0000ffff; // extract low halfword
+ u = ::max(r, s); // maximum of low halfwords
+ v = ::min(r, s); // minimum of low halfwords
+ s = a & 0xffff0000; // extract high halfword
+ r = b & 0xffff0000; // extract high halfword
+ t = ::max(r, s); // maximum of high halfwords
+ s = ::min(r, s); // minimum of high halfwords
+ r = u | t; // maximum of both halfwords
+ s = v | s; // minimum of both halfwords
+ r = r - s; // |a - b| = max(a,b) - min(a,b);
+ #endif
+
+ return r;
+ }
+
+ static __device__ __forceinline__ unsigned int vavg2(unsigned int a, unsigned int b)
+ {
+ unsigned int r, s;
+
+ // HAKMEM #23: a + b = 2 * (a & b) + (a ^ b) ==>
+ // (a + b) / 2 = (a & b) + ((a ^ b) >> 1)
+ s = a ^ b;
+ r = a & b;
+ s = s & 0xfffefffe; // ensure shift doesn't cross halfword boundaries
+ s = s >> 1;
+ s = r + s;
+
+ return s;
+ }
+
+ static __device__ __forceinline__ unsigned int vavrg2(unsigned int a, unsigned int b)
+ {
+ unsigned int r = 0;
+
+ #if __CUDA_ARCH__ >= 300
+ asm("vavrg2.u32.u32.u32 %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
+ #else
+ // HAKMEM #23: a + b = 2 * (a | b) - (a ^ b) ==>
+ // (a + b + 1) / 2 = (a | b) - ((a ^ b) >> 1)
+ unsigned int s;
+ s = a ^ b;
+ r = a | b;
+ s = s & 0xfffefffe; // ensure shift doesn't cross half-word boundaries
+ s = s >> 1;
+ r = r - s;
+ #endif
+
+ return r;
+ }
+
+ static __device__ __forceinline__ unsigned int vseteq2(unsigned int a, unsigned int b)
+ {
+ unsigned int r = 0;
+
+ #if __CUDA_ARCH__ >= 300
+ asm("vset2.u32.u32.eq %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
+ #else
+ // inspired by Alan Mycroft's null-byte detection algorithm:
+ // null_byte(x) = ((x - 0x01010101) & (~x & 0x80808080))
+ unsigned int c;
+ r = a ^ b; // 0x0000 if a == b
+ c = r | 0x80008000; // set msbs, to catch carry out
+ r = r ^ c; // extract msbs, msb = 1 if r < 0x8000
+ c = c - 0x00010001; // msb = 0, if r was 0x0000 or 0x8000
+ c = r & ~c; // msb = 1, if r was 0x0000
+ r = c >> 15; // convert to bool
+ #endif
+
+ return r;
+ }
+
+ static __device__ __forceinline__ unsigned int vcmpeq2(unsigned int a, unsigned int b)
+ {
+ unsigned int r, c;
+
+ #if __CUDA_ARCH__ >= 300
+ r = vseteq2(a, b);
+ c = r << 16; // convert bool
+ r = c - r; // into mask
+ #else
+ // inspired by Alan Mycroft's null-byte detection algorithm:
+ // null_byte(x) = ((x - 0x01010101) & (~x & 0x80808080))
+ r = a ^ b; // 0x0000 if a == b
+ c = r | 0x80008000; // set msbs, to catch carry out
+ r = r ^ c; // extract msbs, msb = 1 if r < 0x8000
+ c = c - 0x00010001; // msb = 0, if r was 0x0000 or 0x8000
+ c = r & ~c; // msb = 1, if r was 0x0000
+ r = c >> 15; // convert
+ r = c - r; // msbs to
+ r = c | r; // mask
+ #endif
+
+ return r;
+ }
+
+ static __device__ __forceinline__ unsigned int vsetge2(unsigned int a, unsigned int b)
+ {
+ unsigned int r = 0;
+
+ #if __CUDA_ARCH__ >= 300
+ asm("vset2.u32.u32.ge %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
+ #else
+ unsigned int c;
+ asm("not.b32 %0, %0;" : "+r"(b));
+ c = vavrg2(a, b); // (a + ~b + 1) / 2 = (a - b) / 2
+ c = c & 0x80008000; // msb = carry-outs
+ r = c >> 15; // convert to bool
+ #endif
+
+ return r;
+ }
+
+ static __device__ __forceinline__ unsigned int vcmpge2(unsigned int a, unsigned int b)
+ {
+ unsigned int r, c;
+
+ #if __CUDA_ARCH__ >= 300
+ r = vsetge2(a, b);
+ c = r << 16; // convert bool
+ r = c - r; // into mask
+ #else
+ asm("not.b32 %0, %0;" : "+r"(b));
+ c = vavrg2(a, b); // (a + ~b + 1) / 2 = (a - b) / 2
+ c = c & 0x80008000; // msb = carry-outs
+ r = c >> 15; // convert
+ r = c - r; // msbs to
+ r = c | r; // mask
+ #endif
+
+ return r;
+ }
+
+ static __device__ __forceinline__ unsigned int vsetgt2(unsigned int a, unsigned int b)
+ {
+ unsigned int r = 0;
+
+ #if __CUDA_ARCH__ >= 300
+ asm("vset2.u32.u32.gt %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
+ #else
+ unsigned int c;
+ asm("not.b32 %0, %0;" : "+r"(b));
+ c = vavg2(a, b); // (a + ~b) / 2 = (a - b) / 2 [rounded down]
+ c = c & 0x80008000; // msbs = carry-outs
+ r = c >> 15; // convert to bool
+ #endif
+
+ return r;
+ }
+
+ static __device__ __forceinline__ unsigned int vcmpgt2(unsigned int a, unsigned int b)
+ {
+ unsigned int r, c;
+
+ #if __CUDA_ARCH__ >= 300
+ r = vsetgt2(a, b);
+ c = r << 16; // convert bool
+ r = c - r; // into mask
+ #else
+ asm("not.b32 %0, %0;" : "+r"(b));
+ c = vavg2(a, b); // (a + ~b) / 2 = (a - b) / 2 [rounded down]
+ c = c & 0x80008000; // msbs = carry-outs
+ r = c >> 15; // convert
+ r = c - r; // msbs to
+ r = c | r; // mask
+ #endif
+
+ return r;
+ }
+
+ static __device__ __forceinline__ unsigned int vsetle2(unsigned int a, unsigned int b)
+ {
+ unsigned int r = 0;
+
+ #if __CUDA_ARCH__ >= 300
+ asm("vset2.u32.u32.le %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
+ #else
+ unsigned int c;
+ asm("not.b32 %0, %0;" : "+r"(a));
+ c = vavrg2(a, b); // (b + ~a + 1) / 2 = (b - a) / 2
+ c = c & 0x80008000; // msb = carry-outs
+ r = c >> 15; // convert to bool
+ #endif
+
+ return r;
+ }
+
+ static __device__ __forceinline__ unsigned int vcmple2(unsigned int a, unsigned int b)
+ {
+ unsigned int r, c;
+
+ #if __CUDA_ARCH__ >= 300
+ r = vsetle2(a, b);
+ c = r << 16; // convert bool
+ r = c - r; // into mask
+ #else
+ asm("not.b32 %0, %0;" : "+r"(a));
+ c = vavrg2(a, b); // (b + ~a + 1) / 2 = (b - a) / 2
+ c = c & 0x80008000; // msb = carry-outs
+ r = c >> 15; // convert
+ r = c - r; // msbs to
+ r = c | r; // mask
+ #endif
+
+ return r;
+ }
+
+ static __device__ __forceinline__ unsigned int vsetlt2(unsigned int a, unsigned int b)
+ {
+ unsigned int r = 0;
+
+ #if __CUDA_ARCH__ >= 300
+ asm("vset2.u32.u32.lt %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
+ #else
+ unsigned int c;
+ asm("not.b32 %0, %0;" : "+r"(a));
+ c = vavg2(a, b); // (b + ~a) / 2 = (b - a) / 2 [rounded down]
+ c = c & 0x80008000; // msb = carry-outs
+ r = c >> 15; // convert to bool
+ #endif
+
+ return r;
+ }
+
+ static __device__ __forceinline__ unsigned int vcmplt2(unsigned int a, unsigned int b)
+ {
+ unsigned int r, c;
+
+ #if __CUDA_ARCH__ >= 300
+ r = vsetlt2(a, b);
+ c = r << 16; // convert bool
+ r = c - r; // into mask
+ #else
+ asm("not.b32 %0, %0;" : "+r"(a));
+ c = vavg2(a, b); // (b + ~a) / 2 = (b - a) / 2 [rounded down]
+ c = c & 0x80008000; // msb = carry-outs
+ r = c >> 15; // convert
+ r = c - r; // msbs to
+ r = c | r; // mask
+ #endif
+
+ return r;
+ }
+
+ static __device__ __forceinline__ unsigned int vsetne2(unsigned int a, unsigned int b)
+ {
+ unsigned int r = 0;
+
+ #if __CUDA_ARCH__ >= 300
+ asm ("vset2.u32.u32.ne %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
+ #else
+ // inspired by Alan Mycroft's null-byte detection algorithm:
+ // null_byte(x) = ((x - 0x01010101) & (~x & 0x80808080))
+ unsigned int c;
+ r = a ^ b; // 0x0000 if a == b
+ c = r | 0x80008000; // set msbs, to catch carry out
+ c = c - 0x00010001; // msb = 0, if r was 0x0000 or 0x8000
+ c = r | c; // msb = 1, if r was not 0x0000
+ c = c & 0x80008000; // extract msbs
+ r = c >> 15; // convert to bool
+ #endif
+
+ return r;
+ }
+
+ static __device__ __forceinline__ unsigned int vcmpne2(unsigned int a, unsigned int b)
+ {
+ unsigned int r, c;
+
+ #if __CUDA_ARCH__ >= 300
+ r = vsetne2(a, b);
+ c = r << 16; // convert bool
+ r = c - r; // into mask
+ #else
+ // inspired by Alan Mycroft's null-byte detection algorithm:
+ // null_byte(x) = ((x - 0x01010101) & (~x & 0x80808080))
+ r = a ^ b; // 0x0000 if a == b
+ c = r | 0x80008000; // set msbs, to catch carry out
+ c = c - 0x00010001; // msb = 0, if r was 0x0000 or 0x8000
+ c = r | c; // msb = 1, if r was not 0x0000
+ c = c & 0x80008000; // extract msbs
+ r = c >> 15; // convert
+ r = c - r; // msbs to
+ r = c | r; // mask
+ #endif
+
+ return r;
+ }
+
+ static __device__ __forceinline__ unsigned int vmax2(unsigned int a, unsigned int b)
+ {
+ unsigned int r = 0;
+
+ #if __CUDA_ARCH__ >= 300
+ asm("vmax2.u32.u32.u32 %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
+ #elif __CUDA_ARCH__ >= 200
+ asm("vmax.u32.u32.u32 %0.h0, %1.h0, %2.h0, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
+ asm("vmax.u32.u32.u32 %0.h1, %1.h1, %2.h1, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
+ #else
+ unsigned int s, t, u;
+ r = a & 0x0000ffff; // extract low halfword
+ s = b & 0x0000ffff; // extract low halfword
+ t = ::max(r, s); // maximum of low halfwords
+ r = a & 0xffff0000; // extract high halfword
+ s = b & 0xffff0000; // extract high halfword
+ u = ::max(r, s); // maximum of high halfwords
+ r = t | u; // combine halfword maximums
+ #endif
+
+ return r;
+ }
+
+ static __device__ __forceinline__ unsigned int vmin2(unsigned int a, unsigned int b)
+ {
+ unsigned int r = 0;
+
+ #if __CUDA_ARCH__ >= 300
+ asm("vmin2.u32.u32.u32 %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
+ #elif __CUDA_ARCH__ >= 200
+ asm("vmin.u32.u32.u32 %0.h0, %1.h0, %2.h0, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
+ asm("vmin.u32.u32.u32 %0.h1, %1.h1, %2.h1, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
+ #else
+ unsigned int s, t, u;
+ r = a & 0x0000ffff; // extract low halfword
+ s = b & 0x0000ffff; // extract low halfword
+ t = ::min(r, s); // minimum of low halfwords
+ r = a & 0xffff0000; // extract high halfword
+ s = b & 0xffff0000; // extract high halfword
+ u = ::min(r, s); // minimum of high halfwords
+ r = t | u; // combine halfword minimums
+ #endif
+
+ return r;
+ }
+
+ // 4
+
+ static __device__ __forceinline__ unsigned int vadd4(unsigned int a, unsigned int b)
+ {
+ unsigned int r = 0;
+
+ #if __CUDA_ARCH__ >= 300
+ asm("vadd4.u32.u32.u32.sat %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
+ #elif __CUDA_ARCH__ >= 200
+ asm("vadd.u32.u32.u32.sat %0.b0, %1.b0, %2.b0, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
+ asm("vadd.u32.u32.u32.sat %0.b1, %1.b1, %2.b1, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
+ asm("vadd.u32.u32.u32.sat %0.b2, %1.b2, %2.b2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
+ asm("vadd.u32.u32.u32.sat %0.b3, %1.b3, %2.b3, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
+ #else
+ unsigned int s, t;
+ s = a ^ b; // sum bits
+ r = a & 0x7f7f7f7f; // clear msbs
+ t = b & 0x7f7f7f7f; // clear msbs
+ s = s & 0x80808080; // msb sum bits
+ r = r + t; // add without msbs, record carry-out in msbs
+ r = r ^ s; // sum of msb sum and carry-in bits, w/o carry-out
+ #endif /* __CUDA_ARCH__ >= 300 */
+
+ return r;
+ }
+
+ static __device__ __forceinline__ unsigned int vsub4(unsigned int a, unsigned int b)
+ {
+ unsigned int r = 0;
+
+ #if __CUDA_ARCH__ >= 300
+ asm("vsub4.u32.u32.u32.sat %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
+ #elif __CUDA_ARCH__ >= 200
+ asm("vsub.u32.u32.u32.sat %0.b0, %1.b0, %2.b0, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
+ asm("vsub.u32.u32.u32.sat %0.b1, %1.b1, %2.b1, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
+ asm("vsub.u32.u32.u32.sat %0.b2, %1.b2, %2.b2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
+ asm("vsub.u32.u32.u32.sat %0.b3, %1.b3, %2.b3, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
+ #else
+ unsigned int s, t;
+ s = a ^ ~b; // inverted sum bits
+ r = a | 0x80808080; // set msbs
+ t = b & 0x7f7f7f7f; // clear msbs
+ s = s & 0x80808080; // inverted msb sum bits
+ r = r - t; // subtract w/o msbs, record inverted borrows in msb
+ r = r ^ s; // combine inverted msb sum bits and borrows
+ #endif
+
+ return r;
+ }
+
+ static __device__ __forceinline__ unsigned int vavg4(unsigned int a, unsigned int b)
+ {
+ unsigned int r, s;
+
+ // HAKMEM #23: a + b = 2 * (a & b) + (a ^ b) ==>
+ // (a + b) / 2 = (a & b) + ((a ^ b) >> 1)
+ s = a ^ b;
+ r = a & b;
+ s = s & 0xfefefefe; // ensure following shift doesn't cross byte boundaries
+ s = s >> 1;
+ s = r + s;
+
+ return s;
+ }
+
+ static __device__ __forceinline__ unsigned int vavrg4(unsigned int a, unsigned int b)
+ {
+ unsigned int r = 0;
+
+ #if __CUDA_ARCH__ >= 300
+ asm("vavrg4.u32.u32.u32 %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
+ #else
+ // HAKMEM #23: a + b = 2 * (a | b) - (a ^ b) ==>
+ // (a + b + 1) / 2 = (a | b) - ((a ^ b) >> 1)
+ unsigned int c;
+ c = a ^ b;
+ r = a | b;
+ c = c & 0xfefefefe; // ensure following shift doesn't cross byte boundaries
+ c = c >> 1;
+ r = r - c;
+ #endif
+
+ return r;
+ }
+
+ static __device__ __forceinline__ unsigned int vseteq4(unsigned int a, unsigned int b)
+ {
+ unsigned int r = 0;
+
+ #if __CUDA_ARCH__ >= 300
+ asm("vset4.u32.u32.eq %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
+ #else
+ // inspired by Alan Mycroft's null-byte detection algorithm:
+ // null_byte(x) = ((x - 0x01010101) & (~x & 0x80808080))
+ unsigned int c;
+ r = a ^ b; // 0x00 if a == b
+ c = r | 0x80808080; // set msbs, to catch carry out
+ r = r ^ c; // extract msbs, msb = 1 if r < 0x80
+ c = c - 0x01010101; // msb = 0, if r was 0x00 or 0x80
+ c = r & ~c; // msb = 1, if r was 0x00
+ r = c >> 7; // convert to bool
+ #endif
+
+ return r;
+ }
+
+ static __device__ __forceinline__ unsigned int vcmpeq4(unsigned int a, unsigned int b)
+ {
+ unsigned int r, t;
+
+ #if __CUDA_ARCH__ >= 300
+ r = vseteq4(a, b);
+ t = r << 8; // convert bool
+ r = t - r; // to mask
+ #else
+ // inspired by Alan Mycroft's null-byte detection algorithm:
+ // null_byte(x) = ((x - 0x01010101) & (~x & 0x80808080))
+ t = a ^ b; // 0x00 if a == b
+ r = t | 0x80808080; // set msbs, to catch carry out
+ t = t ^ r; // extract msbs, msb = 1 if t < 0x80
+ r = r - 0x01010101; // msb = 0, if t was 0x00 or 0x80
+ r = t & ~r; // msb = 1, if t was 0x00
+ t = r >> 7; // build mask
+ t = r - t; // from
+ r = t | r; // msbs
+ #endif
+
+ return r;
+ }
+
+ static __device__ __forceinline__ unsigned int vsetle4(unsigned int a, unsigned int b)
+ {
+ unsigned int r = 0;
+
+ #if __CUDA_ARCH__ >= 300
+ asm("vset4.u32.u32.le %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
+ #else
+ unsigned int c;
+ asm("not.b32 %0, %0;" : "+r"(a));
+ c = vavrg4(a, b); // (b + ~a + 1) / 2 = (b - a) / 2
+ c = c & 0x80808080; // msb = carry-outs
+ r = c >> 7; // convert to bool
+ #endif
+
+ return r;
+ }
+
+ static __device__ __forceinline__ unsigned int vcmple4(unsigned int a, unsigned int b)
+ {
+ unsigned int r, c;
+
+ #if __CUDA_ARCH__ >= 300
+ r = vsetle4(a, b);
+ c = r << 8; // convert bool
+ r = c - r; // to mask
+ #else
+ asm("not.b32 %0, %0;" : "+r"(a));
+ c = vavrg4(a, b); // (b + ~a + 1) / 2 = (b - a) / 2
+ c = c & 0x80808080; // msbs = carry-outs
+ r = c >> 7; // convert
+ r = c - r; // msbs to
+ r = c | r; // mask
+ #endif
+
+ return r;
+ }
+
+ static __device__ __forceinline__ unsigned int vsetlt4(unsigned int a, unsigned int b)
+ {
+ unsigned int r = 0;
+
+ #if __CUDA_ARCH__ >= 300
+ asm("vset4.u32.u32.lt %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
+ #else
+ unsigned int c;
+ asm("not.b32 %0, %0;" : "+r"(a));
+ c = vavg4(a, b); // (b + ~a) / 2 = (b - a) / 2 [rounded down]
+ c = c & 0x80808080; // msb = carry-outs
+ r = c >> 7; // convert to bool
+ #endif
+
+ return r;
+ }
+
+ static __device__ __forceinline__ unsigned int vcmplt4(unsigned int a, unsigned int b)
+ {
+ unsigned int r, c;
+
+ #if __CUDA_ARCH__ >= 300
+ r = vsetlt4(a, b);
+ c = r << 8; // convert bool
+ r = c - r; // to mask
+ #else
+ asm("not.b32 %0, %0;" : "+r"(a));
+ c = vavg4(a, b); // (b + ~a) / 2 = (b - a) / 2 [rounded down]
+ c = c & 0x80808080; // msbs = carry-outs
+ r = c >> 7; // convert
+ r = c - r; // msbs to
+ r = c | r; // mask
+ #endif
+
+ return r;
+ }
+
+ static __device__ __forceinline__ unsigned int vsetge4(unsigned int a, unsigned int b)
+ {
+ unsigned int r = 0;
+
+ #if __CUDA_ARCH__ >= 300
+ asm("vset4.u32.u32.ge %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
+ #else
+ unsigned int c;
+ asm("not.b32 %0, %0;" : "+r"(b));
+ c = vavrg4(a, b); // (a + ~b + 1) / 2 = (a - b) / 2
+ c = c & 0x80808080; // msb = carry-outs
+ r = c >> 7; // convert to bool
+ #endif
+
+ return r;
+ }
+
+ static __device__ __forceinline__ unsigned int vcmpge4(unsigned int a, unsigned int b)
+ {
+ unsigned int r, s;
+
+ #if __CUDA_ARCH__ >= 300
+ r = vsetge4(a, b);
+ s = r << 8; // convert bool
+ r = s - r; // to mask
+ #else
+ asm ("not.b32 %0,%0;" : "+r"(b));
+ r = vavrg4 (a, b); // (a + ~b + 1) / 2 = (a - b) / 2
+ r = r & 0x80808080; // msb = carry-outs
+ s = r >> 7; // build mask
+ s = r - s; // from
+ r = s | r; // msbs
+ #endif
+
+ return r;
+ }
+
+ static __device__ __forceinline__ unsigned int vsetgt4(unsigned int a, unsigned int b)
+ {
+ unsigned int r = 0;
+
+ #if __CUDA_ARCH__ >= 300
+ asm("vset4.u32.u32.gt %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
+ #else
+ unsigned int c;
+ asm("not.b32 %0, %0;" : "+r"(b));
+ c = vavg4(a, b); // (a + ~b) / 2 = (a - b) / 2 [rounded down]
+ c = c & 0x80808080; // msb = carry-outs
+ r = c >> 7; // convert to bool
+ #endif
+
+ return r;
+ }
+
+ static __device__ __forceinline__ unsigned int vcmpgt4(unsigned int a, unsigned int b)
+ {
+ unsigned int r, c;
+
+ #if __CUDA_ARCH__ >= 300
+ r = vsetgt4(a, b);
+ c = r << 8; // convert bool
+ r = c - r; // to mask
+ #else
+ asm("not.b32 %0, %0;" : "+r"(b));
+ c = vavg4(a, b); // (a + ~b) / 2 = (a - b) / 2 [rounded down]
+ c = c & 0x80808080; // msb = carry-outs
+ r = c >> 7; // convert
+ r = c - r; // msbs to
+ r = c | r; // mask
+ #endif
+
+ return r;
+ }
+
+ static __device__ __forceinline__ unsigned int vsetne4(unsigned int a, unsigned int b)
+ {
+ unsigned int r = 0;
+
+ #if __CUDA_ARCH__ >= 300
+ asm("vset4.u32.u32.ne %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
+ #else
+ // inspired by Alan Mycroft's null-byte detection algorithm:
+ // null_byte(x) = ((x - 0x01010101) & (~x & 0x80808080))
+ unsigned int c;
+ r = a ^ b; // 0x00 if a == b
+ c = r | 0x80808080; // set msbs, to catch carry out
+ c = c - 0x01010101; // msb = 0, if r was 0x00 or 0x80
+ c = r | c; // msb = 1, if r was not 0x00
+ c = c & 0x80808080; // extract msbs
+ r = c >> 7; // convert to bool
+ #endif
+
+ return r;
+ }
+
+ static __device__ __forceinline__ unsigned int vcmpne4(unsigned int a, unsigned int b)
+ {
+ unsigned int r, c;
+
+ #if __CUDA_ARCH__ >= 300
+ r = vsetne4(a, b);
+ c = r << 8; // convert bool
+ r = c - r; // to mask
+ #else
+ // inspired by Alan Mycroft's null-byte detection algorithm:
+ // null_byte(x) = ((x - 0x01010101) & (~x & 0x80808080))
+ r = a ^ b; // 0x00 if a == b
+ c = r | 0x80808080; // set msbs, to catch carry out
+ c = c - 0x01010101; // msb = 0, if r was 0x00 or 0x80
+ c = r | c; // msb = 1, if r was not 0x00
+ c = c & 0x80808080; // extract msbs
+ r = c >> 7; // convert
+ r = c - r; // msbs to
+ r = c | r; // mask
+ #endif
+
+ return r;
+ }
+
+ static __device__ __forceinline__ unsigned int vabsdiff4(unsigned int a, unsigned int b)
+ {
+ unsigned int r = 0;
+
+ #if __CUDA_ARCH__ >= 300
+ asm("vabsdiff4.u32.u32.u32.sat %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
+ #elif __CUDA_ARCH__ >= 200
+ asm("vabsdiff.u32.u32.u32.sat %0.b0, %1.b0, %2.b0, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
+ asm("vabsdiff.u32.u32.u32.sat %0.b1, %1.b1, %2.b1, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
+ asm("vabsdiff.u32.u32.u32.sat %0.b2, %1.b2, %2.b2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
+ asm("vabsdiff.u32.u32.u32.sat %0.b3, %1.b3, %2.b3, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
+ #else
+ unsigned int s;
+ s = vcmpge4(a, b); // mask = 0xff if a >= b
+ r = a ^ b; //
+ s = (r & s) ^ b; // select a when a >= b, else select b => max(a,b)
+ r = s ^ r; // select a when b >= a, else select b => min(a,b)
+ r = s - r; // |a - b| = max(a,b) - min(a,b);
+ #endif
+
+ return r;
+ }
+
+ static __device__ __forceinline__ unsigned int vmax4(unsigned int a, unsigned int b)
+ {
+ unsigned int r = 0;
+
+ #if __CUDA_ARCH__ >= 300
+ asm("vmax4.u32.u32.u32 %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
+ #elif __CUDA_ARCH__ >= 200
+ asm("vmax.u32.u32.u32 %0.b0, %1.b0, %2.b0, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
+ asm("vmax.u32.u32.u32 %0.b1, %1.b1, %2.b1, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
+ asm("vmax.u32.u32.u32 %0.b2, %1.b2, %2.b2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
+ asm("vmax.u32.u32.u32 %0.b3, %1.b3, %2.b3, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
+ #else
+ unsigned int s;
+ s = vcmpge4(a, b); // mask = 0xff if a >= b
+ r = a & s; // select a when b >= a
+ s = b & ~s; // select b when b < a
+ r = r | s; // combine byte selections
+ #endif
+
+ return r; // byte-wise unsigned maximum
+ }
+
+ static __device__ __forceinline__ unsigned int vmin4(unsigned int a, unsigned int b)
+ {
+ unsigned int r = 0;
+
+ #if __CUDA_ARCH__ >= 300
+ asm("vmin4.u32.u32.u32 %0, %1, %2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
+ #elif __CUDA_ARCH__ >= 200
+ asm("vmin.u32.u32.u32 %0.b0, %1.b0, %2.b0, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
+ asm("vmin.u32.u32.u32 %0.b1, %1.b1, %2.b1, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
+ asm("vmin.u32.u32.u32 %0.b2, %1.b2, %2.b2, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
+ asm("vmin.u32.u32.u32 %0.b3, %1.b3, %2.b3, %3;" : "=r"(r) : "r"(a), "r"(b), "r"(r));
+ #else
+ unsigned int s;
+ s = vcmpge4(b, a); // mask = 0xff if a >= b
+ r = a & s; // select a when b >= a
+ s = b & ~s; // select b when b < a
+ r = r | s; // combine byte selections
+ #endif
+
+ return r;
+ }
+}}}
+
+#endif // __OPENCV_GPU_SIMD_FUNCTIONS_HPP__
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/static_check.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/static_check.hpp
new file mode 100644
index 00000000..e77691b7
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/static_check.hpp
@@ -0,0 +1,67 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_GPU_GPU_DEVICE_STATIC_CHECK_HPP__
+#define __OPENCV_GPU_GPU_DEVICE_STATIC_CHECK_HPP__
+
+#if defined(__CUDACC__)
+ #define __OPENCV_GPU_HOST_DEVICE__ __host__ __device__ __forceinline__
+#else
+ #define __OPENCV_GPU_HOST_DEVICE__
+#endif
+
+namespace cv { namespace gpu
+{
+ namespace device
+ {
+ template<bool expr> struct Static {};
+
+ template<> struct Static<true>
+ {
+ __OPENCV_GPU_HOST_DEVICE__ static void check() {};
+ };
+ }
+}}
+
+#undef __OPENCV_GPU_HOST_DEVICE__
+
+#endif /* __OPENCV_GPU_GPU_DEVICE_STATIC_CHECK_HPP__ */
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/transform.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/transform.hpp
new file mode 100644
index 00000000..636caac6
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/transform.hpp
@@ -0,0 +1,67 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_GPU_TRANSFORM_HPP__
+#define __OPENCV_GPU_TRANSFORM_HPP__
+
+#include "common.hpp"
+#include "utility.hpp"
+#include "detail/transform_detail.hpp"
+
+namespace cv { namespace gpu { namespace device
+{
+ template <typename T, typename D, typename UnOp, typename Mask>
+ static inline void transform(PtrStepSz<T> src, PtrStepSz<D> dst, UnOp op, const Mask& mask, cudaStream_t stream)
+ {
+ typedef TransformFunctorTraits<UnOp> ft;
+ transform_detail::TransformDispatcher<VecTraits<T>::cn == 1 && VecTraits<D>::cn == 1 && ft::smart_shift != 1>::call(src, dst, op, mask, stream);
+ }
+
+ template <typename T1, typename T2, typename D, typename BinOp, typename Mask>
+ static inline void transform(PtrStepSz<T1> src1, PtrStepSz<T2> src2, PtrStepSz<D> dst, BinOp op, const Mask& mask, cudaStream_t stream)
+ {
+ typedef TransformFunctorTraits<BinOp> ft;
+ transform_detail::TransformDispatcher<VecTraits<T1>::cn == 1 && VecTraits<T2>::cn == 1 && VecTraits<D>::cn == 1 && ft::smart_shift != 1>::call(src1, src2, dst, op, mask, stream);
+ }
+}}}
+
+#endif // __OPENCV_GPU_TRANSFORM_HPP__
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/type_traits.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/type_traits.hpp
new file mode 100644
index 00000000..1b36acca
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/type_traits.hpp
@@ -0,0 +1,82 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_GPU_TYPE_TRAITS_HPP__
+#define __OPENCV_GPU_TYPE_TRAITS_HPP__
+
+#include "detail/type_traits_detail.hpp"
+
+namespace cv { namespace gpu { namespace device
+{
+ template <typename T> struct IsSimpleParameter
+ {
+ enum {value = type_traits_detail::IsIntegral<T>::value || type_traits_detail::IsFloat<T>::value ||
+ type_traits_detail::PointerTraits<typename type_traits_detail::ReferenceTraits<T>::type>::value};
+ };
+
+ template <typename T> struct TypeTraits
+ {
+ typedef typename type_traits_detail::UnConst<T>::type NonConstType;
+ typedef typename type_traits_detail::UnVolatile<T>::type NonVolatileType;
+ typedef typename type_traits_detail::UnVolatile<typename type_traits_detail::UnConst<T>::type>::type UnqualifiedType;
+ typedef typename type_traits_detail::PointerTraits<UnqualifiedType>::type PointeeType;
+ typedef typename type_traits_detail::ReferenceTraits<T>::type ReferredType;
+
+ enum { isConst = type_traits_detail::UnConst<T>::value };
+ enum { isVolatile = type_traits_detail::UnVolatile<T>::value };
+
+ enum { isReference = type_traits_detail::ReferenceTraits<UnqualifiedType>::value };
+ enum { isPointer = type_traits_detail::PointerTraits<typename type_traits_detail::ReferenceTraits<UnqualifiedType>::type>::value };
+
+ enum { isUnsignedInt = type_traits_detail::IsUnsignedIntegral<UnqualifiedType>::value };
+ enum { isSignedInt = type_traits_detail::IsSignedIntergral<UnqualifiedType>::value };
+ enum { isIntegral = type_traits_detail::IsIntegral<UnqualifiedType>::value };
+ enum { isFloat = type_traits_detail::IsFloat<UnqualifiedType>::value };
+ enum { isArith = isIntegral || isFloat };
+ enum { isVec = type_traits_detail::IsVec<UnqualifiedType>::value };
+
+ typedef typename type_traits_detail::Select<IsSimpleParameter<UnqualifiedType>::value,
+ T, typename type_traits_detail::AddParameterType<T>::type>::type ParameterType;
+ };
+}}}
+
+#endif // __OPENCV_GPU_TYPE_TRAITS_HPP__
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/utility.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/utility.hpp
new file mode 100644
index 00000000..85e81acf
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/utility.hpp
@@ -0,0 +1,213 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_GPU_UTILITY_HPP__
+#define __OPENCV_GPU_UTILITY_HPP__
+
+#include "saturate_cast.hpp"
+#include "datamov_utils.hpp"
+
+namespace cv { namespace gpu { namespace device
+{
+ #define OPENCV_GPU_LOG_WARP_SIZE (5)
+ #define OPENCV_GPU_WARP_SIZE (1 << OPENCV_GPU_LOG_WARP_SIZE)
+ #define OPENCV_GPU_LOG_MEM_BANKS ((__CUDA_ARCH__ >= 200) ? 5 : 4) // 32 banks on fermi, 16 on tesla
+ #define OPENCV_GPU_MEM_BANKS (1 << OPENCV_GPU_LOG_MEM_BANKS)
+
+ ///////////////////////////////////////////////////////////////////////////////
+ // swap
+
+ template <typename T> void __device__ __host__ __forceinline__ swap(T& a, T& b)
+ {
+ const T temp = a;
+ a = b;
+ b = temp;
+ }
+
+ ///////////////////////////////////////////////////////////////////////////////
+ // Mask Reader
+
+ struct SingleMask
+ {
+ explicit __host__ __device__ __forceinline__ SingleMask(PtrStepb mask_) : mask(mask_) {}
+ __host__ __device__ __forceinline__ SingleMask(const SingleMask& mask_): mask(mask_.mask){}
+
+ __device__ __forceinline__ bool operator()(int y, int x) const
+ {
+ return mask.ptr(y)[x] != 0;
+ }
+
+ PtrStepb mask;
+ };
+
+ struct SingleMaskChannels
+ {
+ __host__ __device__ __forceinline__ SingleMaskChannels(PtrStepb mask_, int channels_)
+ : mask(mask_), channels(channels_) {}
+ __host__ __device__ __forceinline__ SingleMaskChannels(const SingleMaskChannels& mask_)
+ :mask(mask_.mask), channels(mask_.channels){}
+
+ __device__ __forceinline__ bool operator()(int y, int x) const
+ {
+ return mask.ptr(y)[x / channels] != 0;
+ }
+
+ PtrStepb mask;
+ int channels;
+ };
+
+ struct MaskCollection
+ {
+ explicit __host__ __device__ __forceinline__ MaskCollection(PtrStepb* maskCollection_)
+ : maskCollection(maskCollection_) {}
+
+ __device__ __forceinline__ MaskCollection(const MaskCollection& masks_)
+ : maskCollection(masks_.maskCollection), curMask(masks_.curMask){}
+
+ __device__ __forceinline__ void next()
+ {
+ curMask = *maskCollection++;
+ }
+ __device__ __forceinline__ void setMask(int z)
+ {
+ curMask = maskCollection[z];
+ }
+
+ __device__ __forceinline__ bool operator()(int y, int x) const
+ {
+ uchar val;
+ return curMask.data == 0 || (ForceGlob<uchar>::Load(curMask.ptr(y), x, val), (val != 0));
+ }
+
+ const PtrStepb* maskCollection;
+ PtrStepb curMask;
+ };
+
+ struct WithOutMask
+ {
+ __host__ __device__ __forceinline__ WithOutMask(){}
+ __host__ __device__ __forceinline__ WithOutMask(const WithOutMask&){}
+
+ __device__ __forceinline__ void next() const
+ {
+ }
+ __device__ __forceinline__ void setMask(int) const
+ {
+ }
+
+ __device__ __forceinline__ bool operator()(int, int) const
+ {
+ return true;
+ }
+
+ __device__ __forceinline__ bool operator()(int, int, int) const
+ {
+ return true;
+ }
+
+ static __device__ __forceinline__ bool check(int, int)
+ {
+ return true;
+ }
+
+ static __device__ __forceinline__ bool check(int, int, int)
+ {
+ return true;
+ }
+ };
+
+ ///////////////////////////////////////////////////////////////////////////////
+ // Solve linear system
+
+ // solve 2x2 linear system Ax=b
+ template <typename T> __device__ __forceinline__ bool solve2x2(const T A[2][2], const T b[2], T x[2])
+ {
+ T det = A[0][0] * A[1][1] - A[1][0] * A[0][1];
+
+ if (det != 0)
+ {
+ double invdet = 1.0 / det;
+
+ x[0] = saturate_cast<T>(invdet * (b[0] * A[1][1] - b[1] * A[0][1]));
+
+ x[1] = saturate_cast<T>(invdet * (A[0][0] * b[1] - A[1][0] * b[0]));
+
+ return true;
+ }
+
+ return false;
+ }
+
+ // solve 3x3 linear system Ax=b
+ template <typename T> __device__ __forceinline__ bool solve3x3(const T A[3][3], const T b[3], T x[3])
+ {
+ T det = A[0][0] * (A[1][1] * A[2][2] - A[1][2] * A[2][1])
+ - A[0][1] * (A[1][0] * A[2][2] - A[1][2] * A[2][0])
+ + A[0][2] * (A[1][0] * A[2][1] - A[1][1] * A[2][0]);
+
+ if (det != 0)
+ {
+ double invdet = 1.0 / det;
+
+ x[0] = saturate_cast<T>(invdet *
+ (b[0] * (A[1][1] * A[2][2] - A[1][2] * A[2][1]) -
+ A[0][1] * (b[1] * A[2][2] - A[1][2] * b[2] ) +
+ A[0][2] * (b[1] * A[2][1] - A[1][1] * b[2] )));
+
+ x[1] = saturate_cast<T>(invdet *
+ (A[0][0] * (b[1] * A[2][2] - A[1][2] * b[2] ) -
+ b[0] * (A[1][0] * A[2][2] - A[1][2] * A[2][0]) +
+ A[0][2] * (A[1][0] * b[2] - b[1] * A[2][0])));
+
+ x[2] = saturate_cast<T>(invdet *
+ (A[0][0] * (A[1][1] * b[2] - b[1] * A[2][1]) -
+ A[0][1] * (A[1][0] * b[2] - b[1] * A[2][0]) +
+ b[0] * (A[1][0] * A[2][1] - A[1][1] * A[2][0])));
+
+ return true;
+ }
+
+ return false;
+ }
+}}} // namespace cv { namespace gpu { namespace device
+
+#endif // __OPENCV_GPU_UTILITY_HPP__
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/vec_distance.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/vec_distance.hpp
new file mode 100644
index 00000000..d5b4bb20
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/vec_distance.hpp
@@ -0,0 +1,224 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_GPU_VEC_DISTANCE_HPP__
+#define __OPENCV_GPU_VEC_DISTANCE_HPP__
+
+#include "reduce.hpp"
+#include "functional.hpp"
+#include "detail/vec_distance_detail.hpp"
+
+namespace cv { namespace gpu { namespace device
+{
+ template <typename T> struct L1Dist
+ {
+ typedef int value_type;
+ typedef int result_type;
+
+ __device__ __forceinline__ L1Dist() : mySum(0) {}
+
+ __device__ __forceinline__ void reduceIter(int val1, int val2)
+ {
+ mySum = __sad(val1, val2, mySum);
+ }
+
+ template <int THREAD_DIM> __device__ __forceinline__ void reduceAll(int* smem, int tid)
+ {
+ reduce<THREAD_DIM>(smem, mySum, tid, plus<int>());
+ }
+
+ __device__ __forceinline__ operator int() const
+ {
+ return mySum;
+ }
+
+ int mySum;
+ };
+ template <> struct L1Dist<float>
+ {
+ typedef float value_type;
+ typedef float result_type;
+
+ __device__ __forceinline__ L1Dist() : mySum(0.0f) {}
+
+ __device__ __forceinline__ void reduceIter(float val1, float val2)
+ {
+ mySum += ::fabs(val1 - val2);
+ }
+
+ template <int THREAD_DIM> __device__ __forceinline__ void reduceAll(float* smem, int tid)
+ {
+ reduce<THREAD_DIM>(smem, mySum, tid, plus<float>());
+ }
+
+ __device__ __forceinline__ operator float() const
+ {
+ return mySum;
+ }
+
+ float mySum;
+ };
+
+ struct L2Dist
+ {
+ typedef float value_type;
+ typedef float result_type;
+
+ __device__ __forceinline__ L2Dist() : mySum(0.0f) {}
+
+ __device__ __forceinline__ void reduceIter(float val1, float val2)
+ {
+ float reg = val1 - val2;
+ mySum += reg * reg;
+ }
+
+ template <int THREAD_DIM> __device__ __forceinline__ void reduceAll(float* smem, int tid)
+ {
+ reduce<THREAD_DIM>(smem, mySum, tid, plus<float>());
+ }
+
+ __device__ __forceinline__ operator float() const
+ {
+ return sqrtf(mySum);
+ }
+
+ float mySum;
+ };
+
+ struct HammingDist
+ {
+ typedef int value_type;
+ typedef int result_type;
+
+ __device__ __forceinline__ HammingDist() : mySum(0) {}
+
+ __device__ __forceinline__ void reduceIter(int val1, int val2)
+ {
+ mySum += __popc(val1 ^ val2);
+ }
+
+ template <int THREAD_DIM> __device__ __forceinline__ void reduceAll(int* smem, int tid)
+ {
+ reduce<THREAD_DIM>(smem, mySum, tid, plus<int>());
+ }
+
+ __device__ __forceinline__ operator int() const
+ {
+ return mySum;
+ }
+
+ int mySum;
+ };
+
+ // calc distance between two vectors in global memory
+ template <int THREAD_DIM, typename Dist, typename T1, typename T2>
+ __device__ void calcVecDiffGlobal(const T1* vec1, const T2* vec2, int len, Dist& dist, typename Dist::result_type* smem, int tid)
+ {
+ for (int i = tid; i < len; i += THREAD_DIM)
+ {
+ T1 val1;
+ ForceGlob<T1>::Load(vec1, i, val1);
+
+ T2 val2;
+ ForceGlob<T2>::Load(vec2, i, val2);
+
+ dist.reduceIter(val1, val2);
+ }
+
+ dist.reduceAll<THREAD_DIM>(smem, tid);
+ }
+
+ // calc distance between two vectors, first vector is cached in register or shared memory, second vector is in global memory
+ template <int THREAD_DIM, int MAX_LEN, bool LEN_EQ_MAX_LEN, typename Dist, typename T1, typename T2>
+ __device__ __forceinline__ void calcVecDiffCached(const T1* vecCached, const T2* vecGlob, int len, Dist& dist, typename Dist::result_type* smem, int tid)
+ {
+ vec_distance_detail::VecDiffCachedCalculator<THREAD_DIM, MAX_LEN, LEN_EQ_MAX_LEN>::calc(vecCached, vecGlob, len, dist, tid);
+
+ dist.reduceAll<THREAD_DIM>(smem, tid);
+ }
+
+ // calc distance between two vectors in global memory
+ template <int THREAD_DIM, typename T1> struct VecDiffGlobal
+ {
+ explicit __device__ __forceinline__ VecDiffGlobal(const T1* vec1_, int = 0, void* = 0, int = 0, int = 0)
+ {
+ vec1 = vec1_;
+ }
+
+ template <typename T2, typename Dist>
+ __device__ __forceinline__ void calc(const T2* vec2, int len, Dist& dist, typename Dist::result_type* smem, int tid) const
+ {
+ calcVecDiffGlobal<THREAD_DIM>(vec1, vec2, len, dist, smem, tid);
+ }
+
+ const T1* vec1;
+ };
+
+ // calc distance between two vectors, first vector is cached in register memory, second vector is in global memory
+ template <int THREAD_DIM, int MAX_LEN, bool LEN_EQ_MAX_LEN, typename U> struct VecDiffCachedRegister
+ {
+ template <typename T1> __device__ __forceinline__ VecDiffCachedRegister(const T1* vec1, int len, U* smem, int glob_tid, int tid)
+ {
+ if (glob_tid < len)
+ smem[glob_tid] = vec1[glob_tid];
+ __syncthreads();
+
+ U* vec1ValsPtr = vec1Vals;
+
+ #pragma unroll
+ for (int i = tid; i < MAX_LEN; i += THREAD_DIM)
+ *vec1ValsPtr++ = smem[i];
+
+ __syncthreads();
+ }
+
+ template <typename T2, typename Dist>
+ __device__ __forceinline__ void calc(const T2* vec2, int len, Dist& dist, typename Dist::result_type* smem, int tid) const
+ {
+ calcVecDiffCached<THREAD_DIM, MAX_LEN, LEN_EQ_MAX_LEN>(vec1Vals, vec2, len, dist, smem, tid);
+ }
+
+ U vec1Vals[MAX_LEN / THREAD_DIM];
+ };
+}}} // namespace cv { namespace gpu { namespace device
+
+#endif // __OPENCV_GPU_VEC_DISTANCE_HPP__
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/vec_math.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/vec_math.hpp
new file mode 100644
index 00000000..a6cb43a2
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/vec_math.hpp
@@ -0,0 +1,922 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_GPU_VECMATH_HPP__
+#define __OPENCV_GPU_VECMATH_HPP__
+
+#include "vec_traits.hpp"
+#include "saturate_cast.hpp"
+
+namespace cv { namespace gpu { namespace device
+{
+
+// saturate_cast
+
+namespace vec_math_detail
+{
+ template <int cn, typename VecD> struct SatCastHelper;
+ template <typename VecD> struct SatCastHelper<1, VecD>
+ {
+ template <typename VecS> static __device__ __forceinline__ VecD cast(const VecS& v)
+ {
+ typedef typename VecTraits<VecD>::elem_type D;
+ return VecTraits<VecD>::make(saturate_cast<D>(v.x));
+ }
+ };
+ template <typename VecD> struct SatCastHelper<2, VecD>
+ {
+ template <typename VecS> static __device__ __forceinline__ VecD cast(const VecS& v)
+ {
+ typedef typename VecTraits<VecD>::elem_type D;
+ return VecTraits<VecD>::make(saturate_cast<D>(v.x), saturate_cast<D>(v.y));
+ }
+ };
+ template <typename VecD> struct SatCastHelper<3, VecD>
+ {
+ template <typename VecS> static __device__ __forceinline__ VecD cast(const VecS& v)
+ {
+ typedef typename VecTraits<VecD>::elem_type D;
+ return VecTraits<VecD>::make(saturate_cast<D>(v.x), saturate_cast<D>(v.y), saturate_cast<D>(v.z));
+ }
+ };
+ template <typename VecD> struct SatCastHelper<4, VecD>
+ {
+ template <typename VecS> static __device__ __forceinline__ VecD cast(const VecS& v)
+ {
+ typedef typename VecTraits<VecD>::elem_type D;
+ return VecTraits<VecD>::make(saturate_cast<D>(v.x), saturate_cast<D>(v.y), saturate_cast<D>(v.z), saturate_cast<D>(v.w));
+ }
+ };
+
+ template <typename VecD, typename VecS> static __device__ __forceinline__ VecD saturate_cast_helper(const VecS& v)
+ {
+ return SatCastHelper<VecTraits<VecD>::cn, VecD>::cast(v);
+ }
+}
+
+template<typename T> static __device__ __forceinline__ T saturate_cast(const uchar1& v) {return vec_math_detail::saturate_cast_helper<T>(v);}
+template<typename T> static __device__ __forceinline__ T saturate_cast(const char1& v) {return vec_math_detail::saturate_cast_helper<T>(v);}
+template<typename T> static __device__ __forceinline__ T saturate_cast(const ushort1& v) {return vec_math_detail::saturate_cast_helper<T>(v);}
+template<typename T> static __device__ __forceinline__ T saturate_cast(const short1& v) {return vec_math_detail::saturate_cast_helper<T>(v);}
+template<typename T> static __device__ __forceinline__ T saturate_cast(const uint1& v) {return vec_math_detail::saturate_cast_helper<T>(v);}
+template<typename T> static __device__ __forceinline__ T saturate_cast(const int1& v) {return vec_math_detail::saturate_cast_helper<T>(v);}
+template<typename T> static __device__ __forceinline__ T saturate_cast(const float1& v) {return vec_math_detail::saturate_cast_helper<T>(v);}
+template<typename T> static __device__ __forceinline__ T saturate_cast(const double1& v) {return vec_math_detail::saturate_cast_helper<T>(v);}
+
+template<typename T> static __device__ __forceinline__ T saturate_cast(const uchar2& v) {return vec_math_detail::saturate_cast_helper<T>(v);}
+template<typename T> static __device__ __forceinline__ T saturate_cast(const char2& v) {return vec_math_detail::saturate_cast_helper<T>(v);}
+template<typename T> static __device__ __forceinline__ T saturate_cast(const ushort2& v) {return vec_math_detail::saturate_cast_helper<T>(v);}
+template<typename T> static __device__ __forceinline__ T saturate_cast(const short2& v) {return vec_math_detail::saturate_cast_helper<T>(v);}
+template<typename T> static __device__ __forceinline__ T saturate_cast(const uint2& v) {return vec_math_detail::saturate_cast_helper<T>(v);}
+template<typename T> static __device__ __forceinline__ T saturate_cast(const int2& v) {return vec_math_detail::saturate_cast_helper<T>(v);}
+template<typename T> static __device__ __forceinline__ T saturate_cast(const float2& v) {return vec_math_detail::saturate_cast_helper<T>(v);}
+template<typename T> static __device__ __forceinline__ T saturate_cast(const double2& v) {return vec_math_detail::saturate_cast_helper<T>(v);}
+
+template<typename T> static __device__ __forceinline__ T saturate_cast(const uchar3& v) {return vec_math_detail::saturate_cast_helper<T>(v);}
+template<typename T> static __device__ __forceinline__ T saturate_cast(const char3& v) {return vec_math_detail::saturate_cast_helper<T>(v);}
+template<typename T> static __device__ __forceinline__ T saturate_cast(const ushort3& v) {return vec_math_detail::saturate_cast_helper<T>(v);}
+template<typename T> static __device__ __forceinline__ T saturate_cast(const short3& v) {return vec_math_detail::saturate_cast_helper<T>(v);}
+template<typename T> static __device__ __forceinline__ T saturate_cast(const uint3& v) {return vec_math_detail::saturate_cast_helper<T>(v);}
+template<typename T> static __device__ __forceinline__ T saturate_cast(const int3& v) {return vec_math_detail::saturate_cast_helper<T>(v);}
+template<typename T> static __device__ __forceinline__ T saturate_cast(const float3& v) {return vec_math_detail::saturate_cast_helper<T>(v);}
+template<typename T> static __device__ __forceinline__ T saturate_cast(const double3& v) {return vec_math_detail::saturate_cast_helper<T>(v);}
+
+template<typename T> static __device__ __forceinline__ T saturate_cast(const uchar4& v) {return vec_math_detail::saturate_cast_helper<T>(v);}
+template<typename T> static __device__ __forceinline__ T saturate_cast(const char4& v) {return vec_math_detail::saturate_cast_helper<T>(v);}
+template<typename T> static __device__ __forceinline__ T saturate_cast(const ushort4& v) {return vec_math_detail::saturate_cast_helper<T>(v);}
+template<typename T> static __device__ __forceinline__ T saturate_cast(const short4& v) {return vec_math_detail::saturate_cast_helper<T>(v);}
+template<typename T> static __device__ __forceinline__ T saturate_cast(const uint4& v) {return vec_math_detail::saturate_cast_helper<T>(v);}
+template<typename T> static __device__ __forceinline__ T saturate_cast(const int4& v) {return vec_math_detail::saturate_cast_helper<T>(v);}
+template<typename T> static __device__ __forceinline__ T saturate_cast(const float4& v) {return vec_math_detail::saturate_cast_helper<T>(v);}
+template<typename T> static __device__ __forceinline__ T saturate_cast(const double4& v) {return vec_math_detail::saturate_cast_helper<T>(v);}
+
+// unary operators
+
+#define CV_CUDEV_IMPLEMENT_VEC_UNARY_OP(op, input_type, output_type) \
+ __device__ __forceinline__ output_type ## 1 operator op(const input_type ## 1 & a) \
+ { \
+ return VecTraits<output_type ## 1>::make(op (a.x)); \
+ } \
+ __device__ __forceinline__ output_type ## 2 operator op(const input_type ## 2 & a) \
+ { \
+ return VecTraits<output_type ## 2>::make(op (a.x), op (a.y)); \
+ } \
+ __device__ __forceinline__ output_type ## 3 operator op(const input_type ## 3 & a) \
+ { \
+ return VecTraits<output_type ## 3>::make(op (a.x), op (a.y), op (a.z)); \
+ } \
+ __device__ __forceinline__ output_type ## 4 operator op(const input_type ## 4 & a) \
+ { \
+ return VecTraits<output_type ## 4>::make(op (a.x), op (a.y), op (a.z), op (a.w)); \
+ }
+
+CV_CUDEV_IMPLEMENT_VEC_UNARY_OP(-, char, char)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_OP(-, short, short)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_OP(-, int, int)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_OP(-, float, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_OP(-, double, double)
+
+CV_CUDEV_IMPLEMENT_VEC_UNARY_OP(!, uchar, uchar)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_OP(!, char, uchar)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_OP(!, ushort, uchar)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_OP(!, short, uchar)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_OP(!, int, uchar)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_OP(!, uint, uchar)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_OP(!, float, uchar)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_OP(!, double, uchar)
+
+CV_CUDEV_IMPLEMENT_VEC_UNARY_OP(~, uchar, uchar)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_OP(~, char, char)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_OP(~, ushort, ushort)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_OP(~, short, short)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_OP(~, int, int)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_OP(~, uint, uint)
+
+#undef CV_CUDEV_IMPLEMENT_VEC_UNARY_OP
+
+// unary functions
+
+#define CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(func_name, func, input_type, output_type) \
+ __device__ __forceinline__ output_type ## 1 func_name(const input_type ## 1 & a) \
+ { \
+ return VecTraits<output_type ## 1>::make(func (a.x)); \
+ } \
+ __device__ __forceinline__ output_type ## 2 func_name(const input_type ## 2 & a) \
+ { \
+ return VecTraits<output_type ## 2>::make(func (a.x), func (a.y)); \
+ } \
+ __device__ __forceinline__ output_type ## 3 func_name(const input_type ## 3 & a) \
+ { \
+ return VecTraits<output_type ## 3>::make(func (a.x), func (a.y), func (a.z)); \
+ } \
+ __device__ __forceinline__ output_type ## 4 func_name(const input_type ## 4 & a) \
+ { \
+ return VecTraits<output_type ## 4>::make(func (a.x), func (a.y), func (a.z), func (a.w)); \
+ }
+
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(abs, /*::abs*/, uchar, uchar)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(abs, ::abs, char, char)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(abs, /*::abs*/, ushort, ushort)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(abs, ::abs, short, short)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(abs, ::abs, int, int)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(abs, /*::abs*/, uint, uint)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(abs, ::fabsf, float, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(abs, ::fabs, double, double)
+
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sqrt, ::sqrtf, uchar, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sqrt, ::sqrtf, char, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sqrt, ::sqrtf, ushort, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sqrt, ::sqrtf, short, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sqrt, ::sqrtf, int, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sqrt, ::sqrtf, uint, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sqrt, ::sqrtf, float, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sqrt, ::sqrt, double, double)
+
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp, ::expf, uchar, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp, ::expf, char, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp, ::expf, ushort, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp, ::expf, short, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp, ::expf, int, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp, ::expf, uint, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp, ::expf, float, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp, ::exp, double, double)
+
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp2, ::exp2f, uchar, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp2, ::exp2f, char, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp2, ::exp2f, ushort, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp2, ::exp2f, short, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp2, ::exp2f, int, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp2, ::exp2f, uint, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp2, ::exp2f, float, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp2, ::exp2, double, double)
+
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp10, ::exp10f, uchar, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp10, ::exp10f, char, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp10, ::exp10f, ushort, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp10, ::exp10f, short, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp10, ::exp10f, int, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp10, ::exp10f, uint, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp10, ::exp10f, float, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(exp10, ::exp10, double, double)
+
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log, ::logf, uchar, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log, ::logf, char, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log, ::logf, ushort, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log, ::logf, short, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log, ::logf, int, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log, ::logf, uint, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log, ::logf, float, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log, ::log, double, double)
+
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log2, ::log2f, uchar, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log2, ::log2f, char, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log2, ::log2f, ushort, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log2, ::log2f, short, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log2, ::log2f, int, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log2, ::log2f, uint, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log2, ::log2f, float, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log2, ::log2, double, double)
+
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log10, ::log10f, uchar, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log10, ::log10f, char, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log10, ::log10f, ushort, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log10, ::log10f, short, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log10, ::log10f, int, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log10, ::log10f, uint, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log10, ::log10f, float, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(log10, ::log10, double, double)
+
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sin, ::sinf, uchar, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sin, ::sinf, char, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sin, ::sinf, ushort, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sin, ::sinf, short, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sin, ::sinf, int, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sin, ::sinf, uint, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sin, ::sinf, float, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sin, ::sin, double, double)
+
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(cos, ::cosf, uchar, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(cos, ::cosf, char, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(cos, ::cosf, ushort, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(cos, ::cosf, short, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(cos, ::cosf, int, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(cos, ::cosf, uint, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(cos, ::cosf, float, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(cos, ::cos, double, double)
+
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(tan, ::tanf, uchar, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(tan, ::tanf, char, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(tan, ::tanf, ushort, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(tan, ::tanf, short, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(tan, ::tanf, int, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(tan, ::tanf, uint, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(tan, ::tanf, float, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(tan, ::tan, double, double)
+
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(asin, ::asinf, uchar, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(asin, ::asinf, char, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(asin, ::asinf, ushort, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(asin, ::asinf, short, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(asin, ::asinf, int, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(asin, ::asinf, uint, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(asin, ::asinf, float, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(asin, ::asin, double, double)
+
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(acos, ::acosf, uchar, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(acos, ::acosf, char, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(acos, ::acosf, ushort, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(acos, ::acosf, short, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(acos, ::acosf, int, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(acos, ::acosf, uint, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(acos, ::acosf, float, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(acos, ::acos, double, double)
+
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(atan, ::atanf, uchar, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(atan, ::atanf, char, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(atan, ::atanf, ushort, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(atan, ::atanf, short, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(atan, ::atanf, int, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(atan, ::atanf, uint, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(atan, ::atanf, float, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(atan, ::atan, double, double)
+
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sinh, ::sinhf, uchar, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sinh, ::sinhf, char, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sinh, ::sinhf, ushort, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sinh, ::sinhf, short, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sinh, ::sinhf, int, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sinh, ::sinhf, uint, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sinh, ::sinhf, float, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sinh, ::sinh, double, double)
+
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(cosh, ::coshf, uchar, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(cosh, ::coshf, char, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(cosh, ::coshf, ushort, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(cosh, ::coshf, short, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(cosh, ::coshf, int, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(cosh, ::coshf, uint, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(cosh, ::coshf, float, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(cosh, ::cosh, double, double)
+
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(tanh, ::tanhf, uchar, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(tanh, ::tanhf, char, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(tanh, ::tanhf, ushort, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(tanh, ::tanhf, short, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(tanh, ::tanhf, int, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(tanh, ::tanhf, uint, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(tanh, ::tanhf, float, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(tanh, ::tanh, double, double)
+
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(asinh, ::asinhf, uchar, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(asinh, ::asinhf, char, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(asinh, ::asinhf, ushort, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(asinh, ::asinhf, short, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(asinh, ::asinhf, int, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(asinh, ::asinhf, uint, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(asinh, ::asinhf, float, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(asinh, ::asinh, double, double)
+
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(acosh, ::acoshf, uchar, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(acosh, ::acoshf, char, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(acosh, ::acoshf, ushort, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(acosh, ::acoshf, short, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(acosh, ::acoshf, int, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(acosh, ::acoshf, uint, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(acosh, ::acoshf, float, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(acosh, ::acosh, double, double)
+
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(atanh, ::atanhf, uchar, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(atanh, ::atanhf, char, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(atanh, ::atanhf, ushort, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(atanh, ::atanhf, short, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(atanh, ::atanhf, int, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(atanh, ::atanhf, uint, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(atanh, ::atanhf, float, float)
+CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(atanh, ::atanh, double, double)
+
+#undef CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC
+
+// binary operators (vec & vec)
+
+#define CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(op, input_type, output_type) \
+ __device__ __forceinline__ output_type ## 1 operator op(const input_type ## 1 & a, const input_type ## 1 & b) \
+ { \
+ return VecTraits<output_type ## 1>::make(a.x op b.x); \
+ } \
+ __device__ __forceinline__ output_type ## 2 operator op(const input_type ## 2 & a, const input_type ## 2 & b) \
+ { \
+ return VecTraits<output_type ## 2>::make(a.x op b.x, a.y op b.y); \
+ } \
+ __device__ __forceinline__ output_type ## 3 operator op(const input_type ## 3 & a, const input_type ## 3 & b) \
+ { \
+ return VecTraits<output_type ## 3>::make(a.x op b.x, a.y op b.y, a.z op b.z); \
+ } \
+ __device__ __forceinline__ output_type ## 4 operator op(const input_type ## 4 & a, const input_type ## 4 & b) \
+ { \
+ return VecTraits<output_type ## 4>::make(a.x op b.x, a.y op b.y, a.z op b.z, a.w op b.w); \
+ }
+
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(+, uchar, int)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(+, char, int)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(+, ushort, int)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(+, short, int)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(+, int, int)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(+, uint, uint)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(+, float, float)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(+, double, double)
+
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(-, uchar, int)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(-, char, int)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(-, ushort, int)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(-, short, int)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(-, int, int)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(-, uint, uint)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(-, float, float)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(-, double, double)
+
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(*, uchar, int)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(*, char, int)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(*, ushort, int)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(*, short, int)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(*, int, int)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(*, uint, uint)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(*, float, float)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(*, double, double)
+
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(/, uchar, int)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(/, char, int)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(/, ushort, int)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(/, short, int)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(/, int, int)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(/, uint, uint)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(/, float, float)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(/, double, double)
+
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(==, uchar, uchar)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(==, char, uchar)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(==, ushort, uchar)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(==, short, uchar)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(==, int, uchar)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(==, uint, uchar)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(==, float, uchar)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(==, double, uchar)
+
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(!=, uchar, uchar)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(!=, char, uchar)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(!=, ushort, uchar)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(!=, short, uchar)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(!=, int, uchar)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(!=, uint, uchar)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(!=, float, uchar)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(!=, double, uchar)
+
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(>, uchar, uchar)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(>, char, uchar)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(>, ushort, uchar)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(>, short, uchar)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(>, int, uchar)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(>, uint, uchar)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(>, float, uchar)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(>, double, uchar)
+
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(<, uchar, uchar)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(<, char, uchar)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(<, ushort, uchar)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(<, short, uchar)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(<, int, uchar)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(<, uint, uchar)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(<, float, uchar)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(<, double, uchar)
+
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(>=, uchar, uchar)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(>=, char, uchar)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(>=, ushort, uchar)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(>=, short, uchar)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(>=, int, uchar)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(>=, uint, uchar)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(>=, float, uchar)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(>=, double, uchar)
+
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(<=, uchar, uchar)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(<=, char, uchar)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(<=, ushort, uchar)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(<=, short, uchar)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(<=, int, uchar)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(<=, uint, uchar)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(<=, float, uchar)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(<=, double, uchar)
+
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(&&, uchar, uchar)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(&&, char, uchar)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(&&, ushort, uchar)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(&&, short, uchar)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(&&, int, uchar)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(&&, uint, uchar)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(&&, float, uchar)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(&&, double, uchar)
+
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(||, uchar, uchar)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(||, char, uchar)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(||, ushort, uchar)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(||, short, uchar)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(||, int, uchar)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(||, uint, uchar)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(||, float, uchar)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(||, double, uchar)
+
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(&, uchar, uchar)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(&, char, char)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(&, ushort, ushort)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(&, short, short)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(&, int, int)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(&, uint, uint)
+
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(|, uchar, uchar)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(|, char, char)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(|, ushort, ushort)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(|, short, short)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(|, int, int)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(|, uint, uint)
+
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(^, uchar, uchar)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(^, char, char)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(^, ushort, ushort)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(^, short, short)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(^, int, int)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_OP(^, uint, uint)
+
+#undef CV_CUDEV_IMPLEMENT_VEC_BINARY_OP
+
+// binary operators (vec & scalar)
+
+#define CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(op, input_type, scalar_type, output_type) \
+ __device__ __forceinline__ output_type ## 1 operator op(const input_type ## 1 & a, scalar_type s) \
+ { \
+ return VecTraits<output_type ## 1>::make(a.x op s); \
+ } \
+ __device__ __forceinline__ output_type ## 1 operator op(scalar_type s, const input_type ## 1 & b) \
+ { \
+ return VecTraits<output_type ## 1>::make(s op b.x); \
+ } \
+ __device__ __forceinline__ output_type ## 2 operator op(const input_type ## 2 & a, scalar_type s) \
+ { \
+ return VecTraits<output_type ## 2>::make(a.x op s, a.y op s); \
+ } \
+ __device__ __forceinline__ output_type ## 2 operator op(scalar_type s, const input_type ## 2 & b) \
+ { \
+ return VecTraits<output_type ## 2>::make(s op b.x, s op b.y); \
+ } \
+ __device__ __forceinline__ output_type ## 3 operator op(const input_type ## 3 & a, scalar_type s) \
+ { \
+ return VecTraits<output_type ## 3>::make(a.x op s, a.y op s, a.z op s); \
+ } \
+ __device__ __forceinline__ output_type ## 3 operator op(scalar_type s, const input_type ## 3 & b) \
+ { \
+ return VecTraits<output_type ## 3>::make(s op b.x, s op b.y, s op b.z); \
+ } \
+ __device__ __forceinline__ output_type ## 4 operator op(const input_type ## 4 & a, scalar_type s) \
+ { \
+ return VecTraits<output_type ## 4>::make(a.x op s, a.y op s, a.z op s, a.w op s); \
+ } \
+ __device__ __forceinline__ output_type ## 4 operator op(scalar_type s, const input_type ## 4 & b) \
+ { \
+ return VecTraits<output_type ## 4>::make(s op b.x, s op b.y, s op b.z, s op b.w); \
+ }
+
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, uchar, int, int)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, uchar, float, float)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, uchar, double, double)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, char, int, int)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, char, float, float)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, char, double, double)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, ushort, int, int)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, ushort, float, float)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, ushort, double, double)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, short, int, int)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, short, float, float)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, short, double, double)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, int, int, int)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, int, float, float)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, int, double, double)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, uint, uint, uint)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, uint, float, float)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, uint, double, double)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, float, float, float)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, float, double, double)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(+, double, double, double)
+
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, uchar, int, int)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, uchar, float, float)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, uchar, double, double)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, char, int, int)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, char, float, float)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, char, double, double)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, ushort, int, int)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, ushort, float, float)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, ushort, double, double)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, short, int, int)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, short, float, float)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, short, double, double)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, int, int, int)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, int, float, float)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, int, double, double)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, uint, uint, uint)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, uint, float, float)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, uint, double, double)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, float, float, float)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, float, double, double)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(-, double, double, double)
+
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, uchar, int, int)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, uchar, float, float)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, uchar, double, double)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, char, int, int)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, char, float, float)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, char, double, double)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, ushort, int, int)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, ushort, float, float)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, ushort, double, double)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, short, int, int)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, short, float, float)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, short, double, double)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, int, int, int)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, int, float, float)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, int, double, double)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, uint, uint, uint)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, uint, float, float)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, uint, double, double)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, float, float, float)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, float, double, double)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(*, double, double, double)
+
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, uchar, int, int)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, uchar, float, float)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, uchar, double, double)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, char, int, int)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, char, float, float)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, char, double, double)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, ushort, int, int)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, ushort, float, float)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, ushort, double, double)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, short, int, int)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, short, float, float)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, short, double, double)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, int, int, int)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, int, float, float)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, int, double, double)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, uint, uint, uint)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, uint, float, float)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, uint, double, double)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, float, float, float)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, float, double, double)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(/, double, double, double)
+
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(==, uchar, uchar, uchar)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(==, char, char, uchar)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(==, ushort, ushort, uchar)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(==, short, short, uchar)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(==, int, int, uchar)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(==, uint, uint, uchar)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(==, float, float, uchar)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(==, double, double, uchar)
+
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(!=, uchar, uchar, uchar)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(!=, char, char, uchar)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(!=, ushort, ushort, uchar)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(!=, short, short, uchar)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(!=, int, int, uchar)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(!=, uint, uint, uchar)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(!=, float, float, uchar)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(!=, double, double, uchar)
+
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(>, uchar, uchar, uchar)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(>, char, char, uchar)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(>, ushort, ushort, uchar)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(>, short, short, uchar)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(>, int, int, uchar)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(>, uint, uint, uchar)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(>, float, float, uchar)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(>, double, double, uchar)
+
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(<, uchar, uchar, uchar)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(<, char, char, uchar)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(<, ushort, ushort, uchar)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(<, short, short, uchar)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(<, int, int, uchar)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(<, uint, uint, uchar)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(<, float, float, uchar)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(<, double, double, uchar)
+
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(>=, uchar, uchar, uchar)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(>=, char, char, uchar)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(>=, ushort, ushort, uchar)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(>=, short, short, uchar)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(>=, int, int, uchar)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(>=, uint, uint, uchar)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(>=, float, float, uchar)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(>=, double, double, uchar)
+
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(<=, uchar, uchar, uchar)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(<=, char, char, uchar)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(<=, ushort, ushort, uchar)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(<=, short, short, uchar)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(<=, int, int, uchar)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(<=, uint, uint, uchar)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(<=, float, float, uchar)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(<=, double, double, uchar)
+
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(&&, uchar, uchar, uchar)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(&&, char, char, uchar)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(&&, ushort, ushort, uchar)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(&&, short, short, uchar)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(&&, int, int, uchar)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(&&, uint, uint, uchar)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(&&, float, float, uchar)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(&&, double, double, uchar)
+
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(||, uchar, uchar, uchar)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(||, char, char, uchar)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(||, ushort, ushort, uchar)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(||, short, short, uchar)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(||, int, int, uchar)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(||, uint, uint, uchar)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(||, float, float, uchar)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(||, double, double, uchar)
+
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(&, uchar, uchar, uchar)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(&, char, char, char)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(&, ushort, ushort, ushort)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(&, short, short, short)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(&, int, int, int)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(&, uint, uint, uint)
+
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(|, uchar, uchar, uchar)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(|, char, char, char)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(|, ushort, ushort, ushort)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(|, short, short, short)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(|, int, int, int)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(|, uint, uint, uint)
+
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(^, uchar, uchar, uchar)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(^, char, char, char)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(^, ushort, ushort, ushort)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(^, short, short, short)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(^, int, int, int)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP(^, uint, uint, uint)
+
+#undef CV_CUDEV_IMPLEMENT_SCALAR_BINARY_OP
+
+// binary function (vec & vec)
+
+#define CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(func_name, func, input_type, output_type) \
+ __device__ __forceinline__ output_type ## 1 func_name(const input_type ## 1 & a, const input_type ## 1 & b) \
+ { \
+ return VecTraits<output_type ## 1>::make(func (a.x, b.x)); \
+ } \
+ __device__ __forceinline__ output_type ## 2 func_name(const input_type ## 2 & a, const input_type ## 2 & b) \
+ { \
+ return VecTraits<output_type ## 2>::make(func (a.x, b.x), func (a.y, b.y)); \
+ } \
+ __device__ __forceinline__ output_type ## 3 func_name(const input_type ## 3 & a, const input_type ## 3 & b) \
+ { \
+ return VecTraits<output_type ## 3>::make(func (a.x, b.x), func (a.y, b.y), func (a.z, b.z)); \
+ } \
+ __device__ __forceinline__ output_type ## 4 func_name(const input_type ## 4 & a, const input_type ## 4 & b) \
+ { \
+ return VecTraits<output_type ## 4>::make(func (a.x, b.x), func (a.y, b.y), func (a.z, b.z), func (a.w, b.w)); \
+ }
+
+CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(max, ::max, uchar, uchar)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(max, ::max, char, char)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(max, ::max, ushort, ushort)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(max, ::max, short, short)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(max, ::max, uint, uint)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(max, ::max, int, int)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(max, ::fmaxf, float, float)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(max, ::fmax, double, double)
+
+CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(min, ::min, uchar, uchar)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(min, ::min, char, char)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(min, ::min, ushort, ushort)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(min, ::min, short, short)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(min, ::min, uint, uint)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(min, ::min, int, int)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(min, ::fminf, float, float)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(min, ::fmin, double, double)
+
+CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(hypot, ::hypotf, uchar, float)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(hypot, ::hypotf, char, float)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(hypot, ::hypotf, ushort, float)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(hypot, ::hypotf, short, float)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(hypot, ::hypotf, uint, float)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(hypot, ::hypotf, int, float)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(hypot, ::hypotf, float, float)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(hypot, ::hypot, double, double)
+
+CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(atan2, ::atan2f, uchar, float)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(atan2, ::atan2f, char, float)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(atan2, ::atan2f, ushort, float)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(atan2, ::atan2f, short, float)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(atan2, ::atan2f, uint, float)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(atan2, ::atan2f, int, float)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(atan2, ::atan2f, float, float)
+CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC(atan2, ::atan2, double, double)
+
+#undef CV_CUDEV_IMPLEMENT_VEC_BINARY_FUNC
+
+// binary function (vec & scalar)
+
+#define CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(func_name, func, input_type, scalar_type, output_type) \
+ __device__ __forceinline__ output_type ## 1 func_name(const input_type ## 1 & a, scalar_type s) \
+ { \
+ return VecTraits<output_type ## 1>::make(func ((output_type) a.x, (output_type) s)); \
+ } \
+ __device__ __forceinline__ output_type ## 1 func_name(scalar_type s, const input_type ## 1 & b) \
+ { \
+ return VecTraits<output_type ## 1>::make(func ((output_type) s, (output_type) b.x)); \
+ } \
+ __device__ __forceinline__ output_type ## 2 func_name(const input_type ## 2 & a, scalar_type s) \
+ { \
+ return VecTraits<output_type ## 2>::make(func ((output_type) a.x, (output_type) s), func ((output_type) a.y, (output_type) s)); \
+ } \
+ __device__ __forceinline__ output_type ## 2 func_name(scalar_type s, const input_type ## 2 & b) \
+ { \
+ return VecTraits<output_type ## 2>::make(func ((output_type) s, (output_type) b.x), func ((output_type) s, (output_type) b.y)); \
+ } \
+ __device__ __forceinline__ output_type ## 3 func_name(const input_type ## 3 & a, scalar_type s) \
+ { \
+ return VecTraits<output_type ## 3>::make(func ((output_type) a.x, (output_type) s), func ((output_type) a.y, (output_type) s), func ((output_type) a.z, (output_type) s)); \
+ } \
+ __device__ __forceinline__ output_type ## 3 func_name(scalar_type s, const input_type ## 3 & b) \
+ { \
+ return VecTraits<output_type ## 3>::make(func ((output_type) s, (output_type) b.x), func ((output_type) s, (output_type) b.y), func ((output_type) s, (output_type) b.z)); \
+ } \
+ __device__ __forceinline__ output_type ## 4 func_name(const input_type ## 4 & a, scalar_type s) \
+ { \
+ return VecTraits<output_type ## 4>::make(func ((output_type) a.x, (output_type) s), func ((output_type) a.y, (output_type) s), func ((output_type) a.z, (output_type) s), func ((output_type) a.w, (output_type) s)); \
+ } \
+ __device__ __forceinline__ output_type ## 4 func_name(scalar_type s, const input_type ## 4 & b) \
+ { \
+ return VecTraits<output_type ## 4>::make(func ((output_type) s, (output_type) b.x), func ((output_type) s, (output_type) b.y), func ((output_type) s, (output_type) b.z), func ((output_type) s, (output_type) b.w)); \
+ }
+
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::max, uchar, uchar, uchar)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::fmaxf, uchar, float, float)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::fmax, uchar, double, double)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::max, char, char, char)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::fmaxf, char, float, float)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::fmax, char, double, double)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::max, ushort, ushort, ushort)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::fmaxf, ushort, float, float)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::fmax, ushort, double, double)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::max, short, short, short)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::fmaxf, short, float, float)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::fmax, short, double, double)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::max, uint, uint, uint)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::fmaxf, uint, float, float)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::fmax, uint, double, double)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::max, int, int, int)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::fmaxf, int, float, float)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::fmax, int, double, double)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::fmaxf, float, float, float)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::fmax, float, double, double)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(max, ::fmax, double, double, double)
+
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::min, uchar, uchar, uchar)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::fminf, uchar, float, float)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::fmin, uchar, double, double)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::min, char, char, char)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::fminf, char, float, float)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::fmin, char, double, double)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::min, ushort, ushort, ushort)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::fminf, ushort, float, float)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::fmin, ushort, double, double)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::min, short, short, short)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::fminf, short, float, float)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::fmin, short, double, double)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::min, uint, uint, uint)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::fminf, uint, float, float)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::fmin, uint, double, double)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::min, int, int, int)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::fminf, int, float, float)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::fmin, int, double, double)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::fminf, float, float, float)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::fmin, float, double, double)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(min, ::fmin, double, double, double)
+
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(hypot, ::hypotf, uchar, float, float)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(hypot, ::hypot, uchar, double, double)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(hypot, ::hypotf, char, float, float)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(hypot, ::hypot, char, double, double)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(hypot, ::hypotf, ushort, float, float)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(hypot, ::hypot, ushort, double, double)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(hypot, ::hypotf, short, float, float)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(hypot, ::hypot, short, double, double)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(hypot, ::hypotf, uint, float, float)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(hypot, ::hypot, uint, double, double)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(hypot, ::hypotf, int, float, float)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(hypot, ::hypot, int, double, double)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(hypot, ::hypotf, float, float, float)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(hypot, ::hypot, float, double, double)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(hypot, ::hypot, double, double, double)
+
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(atan2, ::atan2f, uchar, float, float)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(atan2, ::atan2, uchar, double, double)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(atan2, ::atan2f, char, float, float)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(atan2, ::atan2, char, double, double)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(atan2, ::atan2f, ushort, float, float)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(atan2, ::atan2, ushort, double, double)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(atan2, ::atan2f, short, float, float)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(atan2, ::atan2, short, double, double)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(atan2, ::atan2f, uint, float, float)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(atan2, ::atan2, uint, double, double)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(atan2, ::atan2f, int, float, float)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(atan2, ::atan2, int, double, double)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(atan2, ::atan2f, float, float, float)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(atan2, ::atan2, float, double, double)
+CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC(atan2, ::atan2, double, double, double)
+
+#undef CV_CUDEV_IMPLEMENT_SCALAR_BINARY_FUNC
+
+}}} // namespace cv { namespace gpu { namespace device
+
+#endif // __OPENCV_GPU_VECMATH_HPP__
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/vec_traits.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/vec_traits.hpp
new file mode 100644
index 00000000..8d179c83
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/vec_traits.hpp
@@ -0,0 +1,280 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_GPU_VEC_TRAITS_HPP__
+#define __OPENCV_GPU_VEC_TRAITS_HPP__
+
+#include "common.hpp"
+
+namespace cv { namespace gpu { namespace device
+{
+ template<typename T, int N> struct TypeVec;
+
+ struct __align__(8) uchar8
+ {
+ uchar a0, a1, a2, a3, a4, a5, a6, a7;
+ };
+ static __host__ __device__ __forceinline__ uchar8 make_uchar8(uchar a0, uchar a1, uchar a2, uchar a3, uchar a4, uchar a5, uchar a6, uchar a7)
+ {
+ uchar8 val = {a0, a1, a2, a3, a4, a5, a6, a7};
+ return val;
+ }
+ struct __align__(8) char8
+ {
+ schar a0, a1, a2, a3, a4, a5, a6, a7;
+ };
+ static __host__ __device__ __forceinline__ char8 make_char8(schar a0, schar a1, schar a2, schar a3, schar a4, schar a5, schar a6, schar a7)
+ {
+ char8 val = {a0, a1, a2, a3, a4, a5, a6, a7};
+ return val;
+ }
+ struct __align__(16) ushort8
+ {
+ ushort a0, a1, a2, a3, a4, a5, a6, a7;
+ };
+ static __host__ __device__ __forceinline__ ushort8 make_ushort8(ushort a0, ushort a1, ushort a2, ushort a3, ushort a4, ushort a5, ushort a6, ushort a7)
+ {
+ ushort8 val = {a0, a1, a2, a3, a4, a5, a6, a7};
+ return val;
+ }
+ struct __align__(16) short8
+ {
+ short a0, a1, a2, a3, a4, a5, a6, a7;
+ };
+ static __host__ __device__ __forceinline__ short8 make_short8(short a0, short a1, short a2, short a3, short a4, short a5, short a6, short a7)
+ {
+ short8 val = {a0, a1, a2, a3, a4, a5, a6, a7};
+ return val;
+ }
+ struct __align__(32) uint8
+ {
+ uint a0, a1, a2, a3, a4, a5, a6, a7;
+ };
+ static __host__ __device__ __forceinline__ uint8 make_uint8(uint a0, uint a1, uint a2, uint a3, uint a4, uint a5, uint a6, uint a7)
+ {
+ uint8 val = {a0, a1, a2, a3, a4, a5, a6, a7};
+ return val;
+ }
+ struct __align__(32) int8
+ {
+ int a0, a1, a2, a3, a4, a5, a6, a7;
+ };
+ static __host__ __device__ __forceinline__ int8 make_int8(int a0, int a1, int a2, int a3, int a4, int a5, int a6, int a7)
+ {
+ int8 val = {a0, a1, a2, a3, a4, a5, a6, a7};
+ return val;
+ }
+ struct __align__(32) float8
+ {
+ float a0, a1, a2, a3, a4, a5, a6, a7;
+ };
+ static __host__ __device__ __forceinline__ float8 make_float8(float a0, float a1, float a2, float a3, float a4, float a5, float a6, float a7)
+ {
+ float8 val = {a0, a1, a2, a3, a4, a5, a6, a7};
+ return val;
+ }
+ struct double8
+ {
+ double a0, a1, a2, a3, a4, a5, a6, a7;
+ };
+ static __host__ __device__ __forceinline__ double8 make_double8(double a0, double a1, double a2, double a3, double a4, double a5, double a6, double a7)
+ {
+ double8 val = {a0, a1, a2, a3, a4, a5, a6, a7};
+ return val;
+ }
+
+#define OPENCV_GPU_IMPLEMENT_TYPE_VEC(type) \
+ template<> struct TypeVec<type, 1> { typedef type vec_type; }; \
+ template<> struct TypeVec<type ## 1, 1> { typedef type ## 1 vec_type; }; \
+ template<> struct TypeVec<type, 2> { typedef type ## 2 vec_type; }; \
+ template<> struct TypeVec<type ## 2, 2> { typedef type ## 2 vec_type; }; \
+ template<> struct TypeVec<type, 3> { typedef type ## 3 vec_type; }; \
+ template<> struct TypeVec<type ## 3, 3> { typedef type ## 3 vec_type; }; \
+ template<> struct TypeVec<type, 4> { typedef type ## 4 vec_type; }; \
+ template<> struct TypeVec<type ## 4, 4> { typedef type ## 4 vec_type; }; \
+ template<> struct TypeVec<type, 8> { typedef type ## 8 vec_type; }; \
+ template<> struct TypeVec<type ## 8, 8> { typedef type ## 8 vec_type; };
+
+ OPENCV_GPU_IMPLEMENT_TYPE_VEC(uchar)
+ OPENCV_GPU_IMPLEMENT_TYPE_VEC(char)
+ OPENCV_GPU_IMPLEMENT_TYPE_VEC(ushort)
+ OPENCV_GPU_IMPLEMENT_TYPE_VEC(short)
+ OPENCV_GPU_IMPLEMENT_TYPE_VEC(int)
+ OPENCV_GPU_IMPLEMENT_TYPE_VEC(uint)
+ OPENCV_GPU_IMPLEMENT_TYPE_VEC(float)
+ OPENCV_GPU_IMPLEMENT_TYPE_VEC(double)
+
+ #undef OPENCV_GPU_IMPLEMENT_TYPE_VEC
+
+ template<> struct TypeVec<schar, 1> { typedef schar vec_type; };
+ template<> struct TypeVec<schar, 2> { typedef char2 vec_type; };
+ template<> struct TypeVec<schar, 3> { typedef char3 vec_type; };
+ template<> struct TypeVec<schar, 4> { typedef char4 vec_type; };
+ template<> struct TypeVec<schar, 8> { typedef char8 vec_type; };
+
+ template<> struct TypeVec<bool, 1> { typedef uchar vec_type; };
+ template<> struct TypeVec<bool, 2> { typedef uchar2 vec_type; };
+ template<> struct TypeVec<bool, 3> { typedef uchar3 vec_type; };
+ template<> struct TypeVec<bool, 4> { typedef uchar4 vec_type; };
+ template<> struct TypeVec<bool, 8> { typedef uchar8 vec_type; };
+
+ template<typename T> struct VecTraits;
+
+#define OPENCV_GPU_IMPLEMENT_VEC_TRAITS(type) \
+ template<> struct VecTraits<type> \
+ { \
+ typedef type elem_type; \
+ enum {cn=1}; \
+ static __device__ __host__ __forceinline__ type all(type v) {return v;} \
+ static __device__ __host__ __forceinline__ type make(type x) {return x;} \
+ static __device__ __host__ __forceinline__ type make(const type* v) {return *v;} \
+ }; \
+ template<> struct VecTraits<type ## 1> \
+ { \
+ typedef type elem_type; \
+ enum {cn=1}; \
+ static __device__ __host__ __forceinline__ type ## 1 all(type v) {return make_ ## type ## 1(v);} \
+ static __device__ __host__ __forceinline__ type ## 1 make(type x) {return make_ ## type ## 1(x);} \
+ static __device__ __host__ __forceinline__ type ## 1 make(const type* v) {return make_ ## type ## 1(*v);} \
+ }; \
+ template<> struct VecTraits<type ## 2> \
+ { \
+ typedef type elem_type; \
+ enum {cn=2}; \
+ static __device__ __host__ __forceinline__ type ## 2 all(type v) {return make_ ## type ## 2(v, v);} \
+ static __device__ __host__ __forceinline__ type ## 2 make(type x, type y) {return make_ ## type ## 2(x, y);} \
+ static __device__ __host__ __forceinline__ type ## 2 make(const type* v) {return make_ ## type ## 2(v[0], v[1]);} \
+ }; \
+ template<> struct VecTraits<type ## 3> \
+ { \
+ typedef type elem_type; \
+ enum {cn=3}; \
+ static __device__ __host__ __forceinline__ type ## 3 all(type v) {return make_ ## type ## 3(v, v, v);} \
+ static __device__ __host__ __forceinline__ type ## 3 make(type x, type y, type z) {return make_ ## type ## 3(x, y, z);} \
+ static __device__ __host__ __forceinline__ type ## 3 make(const type* v) {return make_ ## type ## 3(v[0], v[1], v[2]);} \
+ }; \
+ template<> struct VecTraits<type ## 4> \
+ { \
+ typedef type elem_type; \
+ enum {cn=4}; \
+ static __device__ __host__ __forceinline__ type ## 4 all(type v) {return make_ ## type ## 4(v, v, v, v);} \
+ static __device__ __host__ __forceinline__ type ## 4 make(type x, type y, type z, type w) {return make_ ## type ## 4(x, y, z, w);} \
+ static __device__ __host__ __forceinline__ type ## 4 make(const type* v) {return make_ ## type ## 4(v[0], v[1], v[2], v[3]);} \
+ }; \
+ template<> struct VecTraits<type ## 8> \
+ { \
+ typedef type elem_type; \
+ enum {cn=8}; \
+ static __device__ __host__ __forceinline__ type ## 8 all(type v) {return make_ ## type ## 8(v, v, v, v, v, v, v, v);} \
+ static __device__ __host__ __forceinline__ type ## 8 make(type a0, type a1, type a2, type a3, type a4, type a5, type a6, type a7) {return make_ ## type ## 8(a0, a1, a2, a3, a4, a5, a6, a7);} \
+ static __device__ __host__ __forceinline__ type ## 8 make(const type* v) {return make_ ## type ## 8(v[0], v[1], v[2], v[3], v[4], v[5], v[6], v[7]);} \
+ };
+
+ OPENCV_GPU_IMPLEMENT_VEC_TRAITS(uchar)
+ OPENCV_GPU_IMPLEMENT_VEC_TRAITS(ushort)
+ OPENCV_GPU_IMPLEMENT_VEC_TRAITS(short)
+ OPENCV_GPU_IMPLEMENT_VEC_TRAITS(int)
+ OPENCV_GPU_IMPLEMENT_VEC_TRAITS(uint)
+ OPENCV_GPU_IMPLEMENT_VEC_TRAITS(float)
+ OPENCV_GPU_IMPLEMENT_VEC_TRAITS(double)
+
+ #undef OPENCV_GPU_IMPLEMENT_VEC_TRAITS
+
+ template<> struct VecTraits<char>
+ {
+ typedef char elem_type;
+ enum {cn=1};
+ static __device__ __host__ __forceinline__ char all(char v) {return v;}
+ static __device__ __host__ __forceinline__ char make(char x) {return x;}
+ static __device__ __host__ __forceinline__ char make(const char* x) {return *x;}
+ };
+ template<> struct VecTraits<schar>
+ {
+ typedef schar elem_type;
+ enum {cn=1};
+ static __device__ __host__ __forceinline__ schar all(schar v) {return v;}
+ static __device__ __host__ __forceinline__ schar make(schar x) {return x;}
+ static __device__ __host__ __forceinline__ schar make(const schar* x) {return *x;}
+ };
+ template<> struct VecTraits<char1>
+ {
+ typedef schar elem_type;
+ enum {cn=1};
+ static __device__ __host__ __forceinline__ char1 all(schar v) {return make_char1(v);}
+ static __device__ __host__ __forceinline__ char1 make(schar x) {return make_char1(x);}
+ static __device__ __host__ __forceinline__ char1 make(const schar* v) {return make_char1(v[0]);}
+ };
+ template<> struct VecTraits<char2>
+ {
+ typedef schar elem_type;
+ enum {cn=2};
+ static __device__ __host__ __forceinline__ char2 all(schar v) {return make_char2(v, v);}
+ static __device__ __host__ __forceinline__ char2 make(schar x, schar y) {return make_char2(x, y);}
+ static __device__ __host__ __forceinline__ char2 make(const schar* v) {return make_char2(v[0], v[1]);}
+ };
+ template<> struct VecTraits<char3>
+ {
+ typedef schar elem_type;
+ enum {cn=3};
+ static __device__ __host__ __forceinline__ char3 all(schar v) {return make_char3(v, v, v);}
+ static __device__ __host__ __forceinline__ char3 make(schar x, schar y, schar z) {return make_char3(x, y, z);}
+ static __device__ __host__ __forceinline__ char3 make(const schar* v) {return make_char3(v[0], v[1], v[2]);}
+ };
+ template<> struct VecTraits<char4>
+ {
+ typedef schar elem_type;
+ enum {cn=4};
+ static __device__ __host__ __forceinline__ char4 all(schar v) {return make_char4(v, v, v, v);}
+ static __device__ __host__ __forceinline__ char4 make(schar x, schar y, schar z, schar w) {return make_char4(x, y, z, w);}
+ static __device__ __host__ __forceinline__ char4 make(const schar* v) {return make_char4(v[0], v[1], v[2], v[3]);}
+ };
+ template<> struct VecTraits<char8>
+ {
+ typedef schar elem_type;
+ enum {cn=8};
+ static __device__ __host__ __forceinline__ char8 all(schar v) {return make_char8(v, v, v, v, v, v, v, v);}
+ static __device__ __host__ __forceinline__ char8 make(schar a0, schar a1, schar a2, schar a3, schar a4, schar a5, schar a6, schar a7) {return make_char8(a0, a1, a2, a3, a4, a5, a6, a7);}
+ static __device__ __host__ __forceinline__ char8 make(const schar* v) {return make_char8(v[0], v[1], v[2], v[3], v[4], v[5], v[6], v[7]);}
+ };
+}}} // namespace cv { namespace gpu { namespace device
+
+#endif // __OPENCV_GPU_VEC_TRAITS_HPP__
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/warp.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/warp.hpp
new file mode 100644
index 00000000..0f1dc794
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/warp.hpp
@@ -0,0 +1,131 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_GPU_DEVICE_WARP_HPP__
+#define __OPENCV_GPU_DEVICE_WARP_HPP__
+
+namespace cv { namespace gpu { namespace device
+{
+ struct Warp
+ {
+ enum
+ {
+ LOG_WARP_SIZE = 5,
+ WARP_SIZE = 1 << LOG_WARP_SIZE,
+ STRIDE = WARP_SIZE
+ };
+
+ /** \brief Returns the warp lane ID of the calling thread. */
+ static __device__ __forceinline__ unsigned int laneId()
+ {
+ unsigned int ret;
+ asm("mov.u32 %0, %laneid;" : "=r"(ret) );
+ return ret;
+ }
+
+ template<typename It, typename T>
+ static __device__ __forceinline__ void fill(It beg, It end, const T& value)
+ {
+ for(It t = beg + laneId(); t < end; t += STRIDE)
+ *t = value;
+ }
+
+ template<typename InIt, typename OutIt>
+ static __device__ __forceinline__ OutIt copy(InIt beg, InIt end, OutIt out)
+ {
+ for(InIt t = beg + laneId(); t < end; t += STRIDE, out += STRIDE)
+ *out = *t;
+ return out;
+ }
+
+ template<typename InIt, typename OutIt, class UnOp>
+ static __device__ __forceinline__ OutIt transform(InIt beg, InIt end, OutIt out, UnOp op)
+ {
+ for(InIt t = beg + laneId(); t < end; t += STRIDE, out += STRIDE)
+ *out = op(*t);
+ return out;
+ }
+
+ template<typename InIt1, typename InIt2, typename OutIt, class BinOp>
+ static __device__ __forceinline__ OutIt transform(InIt1 beg1, InIt1 end1, InIt2 beg2, OutIt out, BinOp op)
+ {
+ unsigned int lane = laneId();
+
+ InIt1 t1 = beg1 + lane;
+ InIt2 t2 = beg2 + lane;
+ for(; t1 < end1; t1 += STRIDE, t2 += STRIDE, out += STRIDE)
+ *out = op(*t1, *t2);
+ return out;
+ }
+
+ template <class T, class BinOp>
+ static __device__ __forceinline__ T reduce(volatile T *ptr, BinOp op)
+ {
+ const unsigned int lane = laneId();
+
+ if (lane < 16)
+ {
+ T partial = ptr[lane];
+
+ ptr[lane] = partial = op(partial, ptr[lane + 16]);
+ ptr[lane] = partial = op(partial, ptr[lane + 8]);
+ ptr[lane] = partial = op(partial, ptr[lane + 4]);
+ ptr[lane] = partial = op(partial, ptr[lane + 2]);
+ ptr[lane] = partial = op(partial, ptr[lane + 1]);
+ }
+
+ return *ptr;
+ }
+
+ template<typename OutIt, typename T>
+ static __device__ __forceinline__ void yota(OutIt beg, OutIt end, T value)
+ {
+ unsigned int lane = laneId();
+ value += lane;
+
+ for(OutIt t = beg + lane; t < end; t += STRIDE, value += STRIDE)
+ *t = value;
+ }
+ };
+}}} // namespace cv { namespace gpu { namespace device
+
+#endif /* __OPENCV_GPU_DEVICE_WARP_HPP__ */
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/warp_reduce.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/warp_reduce.hpp
new file mode 100644
index 00000000..d4e64c46
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/warp_reduce.hpp
@@ -0,0 +1,68 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef OPENCV_GPU_WARP_REDUCE_HPP__
+#define OPENCV_GPU_WARP_REDUCE_HPP__
+
+namespace cv { namespace gpu { namespace device
+{
+ template <class T>
+ __device__ __forceinline__ T warp_reduce(volatile T *ptr , const unsigned int tid = threadIdx.x)
+ {
+ const unsigned int lane = tid & 31; // index of thread in warp (0..31)
+
+ if (lane < 16)
+ {
+ T partial = ptr[tid];
+
+ ptr[tid] = partial = partial + ptr[tid + 16];
+ ptr[tid] = partial = partial + ptr[tid + 8];
+ ptr[tid] = partial = partial + ptr[tid + 4];
+ ptr[tid] = partial = partial + ptr[tid + 2];
+ ptr[tid] = partial = partial + ptr[tid + 1];
+ }
+
+ return ptr[tid - lane];
+ }
+}}} // namespace cv { namespace gpu { namespace device {
+
+#endif /* OPENCV_GPU_WARP_REDUCE_HPP__ */
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/warp_shuffle.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/warp_shuffle.hpp
new file mode 100644
index 00000000..8b4479a7
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/device/warp_shuffle.hpp
@@ -0,0 +1,145 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_GPU_WARP_SHUFFLE_HPP__
+#define __OPENCV_GPU_WARP_SHUFFLE_HPP__
+
+namespace cv { namespace gpu { namespace device
+{
+ template <typename T>
+ __device__ __forceinline__ T shfl(T val, int srcLane, int width = warpSize)
+ {
+ #if __CUDA_ARCH__ >= 300
+ return __shfl(val, srcLane, width);
+ #else
+ return T();
+ #endif
+ }
+ __device__ __forceinline__ unsigned int shfl(unsigned int val, int srcLane, int width = warpSize)
+ {
+ #if __CUDA_ARCH__ >= 300
+ return (unsigned int) __shfl((int) val, srcLane, width);
+ #else
+ return 0;
+ #endif
+ }
+ __device__ __forceinline__ double shfl(double val, int srcLane, int width = warpSize)
+ {
+ #if __CUDA_ARCH__ >= 300
+ int lo = __double2loint(val);
+ int hi = __double2hiint(val);
+
+ lo = __shfl(lo, srcLane, width);
+ hi = __shfl(hi, srcLane, width);
+
+ return __hiloint2double(hi, lo);
+ #else
+ return 0.0;
+ #endif
+ }
+
+ template <typename T>
+ __device__ __forceinline__ T shfl_down(T val, unsigned int delta, int width = warpSize)
+ {
+ #if __CUDA_ARCH__ >= 300
+ return __shfl_down(val, delta, width);
+ #else
+ return T();
+ #endif
+ }
+ __device__ __forceinline__ unsigned int shfl_down(unsigned int val, unsigned int delta, int width = warpSize)
+ {
+ #if __CUDA_ARCH__ >= 300
+ return (unsigned int) __shfl_down((int) val, delta, width);
+ #else
+ return 0;
+ #endif
+ }
+ __device__ __forceinline__ double shfl_down(double val, unsigned int delta, int width = warpSize)
+ {
+ #if __CUDA_ARCH__ >= 300
+ int lo = __double2loint(val);
+ int hi = __double2hiint(val);
+
+ lo = __shfl_down(lo, delta, width);
+ hi = __shfl_down(hi, delta, width);
+
+ return __hiloint2double(hi, lo);
+ #else
+ return 0.0;
+ #endif
+ }
+
+ template <typename T>
+ __device__ __forceinline__ T shfl_up(T val, unsigned int delta, int width = warpSize)
+ {
+ #if __CUDA_ARCH__ >= 300
+ return __shfl_up(val, delta, width);
+ #else
+ return T();
+ #endif
+ }
+ __device__ __forceinline__ unsigned int shfl_up(unsigned int val, unsigned int delta, int width = warpSize)
+ {
+ #if __CUDA_ARCH__ >= 300
+ return (unsigned int) __shfl_up((int) val, delta, width);
+ #else
+ return 0;
+ #endif
+ }
+ __device__ __forceinline__ double shfl_up(double val, unsigned int delta, int width = warpSize)
+ {
+ #if __CUDA_ARCH__ >= 300
+ int lo = __double2loint(val);
+ int hi = __double2hiint(val);
+
+ lo = __shfl_up(lo, delta, width);
+ hi = __shfl_up(hi, delta, width);
+
+ return __hiloint2double(hi, lo);
+ #else
+ return 0.0;
+ #endif
+ }
+}}}
+
+#endif // __OPENCV_GPU_WARP_SHUFFLE_HPP__
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/devmem2d.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/devmem2d.hpp
new file mode 100644
index 00000000..18dfcd8a
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/devmem2d.hpp
@@ -0,0 +1,43 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#include "opencv2/core/cuda_devptrs.hpp"
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/gpu.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/gpu.hpp
new file mode 100644
index 00000000..de169826
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/gpu.hpp
@@ -0,0 +1,2530 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_GPU_HPP__
+#define __OPENCV_GPU_HPP__
+
+#ifndef SKIP_INCLUDES
+#include <vector>
+#include <memory>
+#include <iosfwd>
+#endif
+
+#include "opencv2/core/gpumat.hpp"
+#include "opencv2/imgproc/imgproc.hpp"
+#include "opencv2/objdetect/objdetect.hpp"
+#include "opencv2/features2d/features2d.hpp"
+
+namespace cv { namespace gpu {
+
+//////////////////////////////// CudaMem ////////////////////////////////
+// CudaMem is limited cv::Mat with page locked memory allocation.
+// Page locked memory is only needed for async and faster coping to GPU.
+// It is convertable to cv::Mat header without reference counting
+// so you can use it with other opencv functions.
+
+// Page-locks the matrix m memory and maps it for the device(s)
+CV_EXPORTS void registerPageLocked(Mat& m);
+// Unmaps the memory of matrix m, and makes it pageable again.
+CV_EXPORTS void unregisterPageLocked(Mat& m);
+
+class CV_EXPORTS CudaMem
+{
+public:
+ enum { ALLOC_PAGE_LOCKED = 1, ALLOC_ZEROCOPY = 2, ALLOC_WRITE_COMBINED = 4 };
+
+ CudaMem();
+ CudaMem(const CudaMem& m);
+
+ CudaMem(int rows, int cols, int type, int _alloc_type = ALLOC_PAGE_LOCKED);
+ CudaMem(Size size, int type, int alloc_type = ALLOC_PAGE_LOCKED);
+
+
+ //! creates from cv::Mat with coping data
+ explicit CudaMem(const Mat& m, int alloc_type = ALLOC_PAGE_LOCKED);
+
+ ~CudaMem();
+
+ CudaMem& operator = (const CudaMem& m);
+
+ //! returns deep copy of the matrix, i.e. the data is copied
+ CudaMem clone() const;
+
+ //! allocates new matrix data unless the matrix already has specified size and type.
+ void create(int rows, int cols, int type, int alloc_type = ALLOC_PAGE_LOCKED);
+ void create(Size size, int type, int alloc_type = ALLOC_PAGE_LOCKED);
+
+ //! decrements reference counter and released memory if needed.
+ void release();
+
+ //! returns matrix header with disabled reference counting for CudaMem data.
+ Mat createMatHeader() const;
+ operator Mat() const;
+
+ //! maps host memory into device address space and returns GpuMat header for it. Throws exception if not supported by hardware.
+ GpuMat createGpuMatHeader() const;
+ operator GpuMat() const;
+
+ //returns if host memory can be mapperd to gpu address space;
+ static bool canMapHostMemory();
+
+ // Please see cv::Mat for descriptions
+ bool isContinuous() const;
+ size_t elemSize() const;
+ size_t elemSize1() const;
+ int type() const;
+ int depth() const;
+ int channels() const;
+ size_t step1() const;
+ Size size() const;
+ bool empty() const;
+
+
+ // Please see cv::Mat for descriptions
+ int flags;
+ int rows, cols;
+ size_t step;
+
+ uchar* data;
+ int* refcount;
+
+ uchar* datastart;
+ uchar* dataend;
+
+ int alloc_type;
+};
+
+//////////////////////////////// CudaStream ////////////////////////////////
+// Encapculates Cuda Stream. Provides interface for async coping.
+// Passed to each function that supports async kernel execution.
+// Reference counting is enabled
+
+class CV_EXPORTS Stream
+{
+public:
+ Stream();
+ ~Stream();
+
+ Stream(const Stream&);
+ Stream& operator =(const Stream&);
+
+ bool queryIfComplete();
+ void waitForCompletion();
+
+ //! downloads asynchronously
+ // Warning! cv::Mat must point to page locked memory (i.e. to CudaMem data or to its subMat)
+ void enqueueDownload(const GpuMat& src, CudaMem& dst);
+ void enqueueDownload(const GpuMat& src, Mat& dst);
+
+ //! uploads asynchronously
+ // Warning! cv::Mat must point to page locked memory (i.e. to CudaMem data or to its ROI)
+ void enqueueUpload(const CudaMem& src, GpuMat& dst);
+ void enqueueUpload(const Mat& src, GpuMat& dst);
+
+ //! copy asynchronously
+ void enqueueCopy(const GpuMat& src, GpuMat& dst);
+
+ //! memory set asynchronously
+ void enqueueMemSet(GpuMat& src, Scalar val);
+ void enqueueMemSet(GpuMat& src, Scalar val, const GpuMat& mask);
+
+ //! converts matrix type, ex from float to uchar depending on type
+ void enqueueConvert(const GpuMat& src, GpuMat& dst, int dtype, double a = 1, double b = 0);
+
+ //! adds a callback to be called on the host after all currently enqueued items in the stream have completed
+ typedef void (*StreamCallback)(Stream& stream, int status, void* userData);
+ void enqueueHostCallback(StreamCallback callback, void* userData);
+
+ static Stream& Null();
+
+ operator bool() const;
+
+private:
+ struct Impl;
+
+ explicit Stream(Impl* impl);
+ void create();
+ void release();
+
+ Impl *impl;
+
+ friend struct StreamAccessor;
+};
+
+
+//////////////////////////////// Filter Engine ////////////////////////////////
+
+/*!
+The Base Class for 1D or Row-wise Filters
+
+This is the base class for linear or non-linear filters that process 1D data.
+In particular, such filters are used for the "horizontal" filtering parts in separable filters.
+*/
+class CV_EXPORTS BaseRowFilter_GPU
+{
+public:
+ BaseRowFilter_GPU(int ksize_, int anchor_) : ksize(ksize_), anchor(anchor_) {}
+ virtual ~BaseRowFilter_GPU() {}
+ virtual void operator()(const GpuMat& src, GpuMat& dst, Stream& stream = Stream::Null()) = 0;
+ int ksize, anchor;
+};
+
+/*!
+The Base Class for Column-wise Filters
+
+This is the base class for linear or non-linear filters that process columns of 2D arrays.
+Such filters are used for the "vertical" filtering parts in separable filters.
+*/
+class CV_EXPORTS BaseColumnFilter_GPU
+{
+public:
+ BaseColumnFilter_GPU(int ksize_, int anchor_) : ksize(ksize_), anchor(anchor_) {}
+ virtual ~BaseColumnFilter_GPU() {}
+ virtual void operator()(const GpuMat& src, GpuMat& dst, Stream& stream = Stream::Null()) = 0;
+ int ksize, anchor;
+};
+
+/*!
+The Base Class for Non-Separable 2D Filters.
+
+This is the base class for linear or non-linear 2D filters.
+*/
+class CV_EXPORTS BaseFilter_GPU
+{
+public:
+ BaseFilter_GPU(const Size& ksize_, const Point& anchor_) : ksize(ksize_), anchor(anchor_) {}
+ virtual ~BaseFilter_GPU() {}
+ virtual void operator()(const GpuMat& src, GpuMat& dst, Stream& stream = Stream::Null()) = 0;
+ Size ksize;
+ Point anchor;
+};
+
+/*!
+The Base Class for Filter Engine.
+
+The class can be used to apply an arbitrary filtering operation to an image.
+It contains all the necessary intermediate buffers.
+*/
+class CV_EXPORTS FilterEngine_GPU
+{
+public:
+ virtual ~FilterEngine_GPU() {}
+
+ virtual void apply(const GpuMat& src, GpuMat& dst, Rect roi = Rect(0,0,-1,-1), Stream& stream = Stream::Null()) = 0;
+};
+
+//! returns the non-separable filter engine with the specified filter
+CV_EXPORTS Ptr<FilterEngine_GPU> createFilter2D_GPU(const Ptr<BaseFilter_GPU>& filter2D, int srcType, int dstType);
+
+//! returns the separable filter engine with the specified filters
+CV_EXPORTS Ptr<FilterEngine_GPU> createSeparableFilter_GPU(const Ptr<BaseRowFilter_GPU>& rowFilter,
+ const Ptr<BaseColumnFilter_GPU>& columnFilter, int srcType, int bufType, int dstType);
+CV_EXPORTS Ptr<FilterEngine_GPU> createSeparableFilter_GPU(const Ptr<BaseRowFilter_GPU>& rowFilter,
+ const Ptr<BaseColumnFilter_GPU>& columnFilter, int srcType, int bufType, int dstType, GpuMat& buf);
+
+//! returns horizontal 1D box filter
+//! supports only CV_8UC1 source type and CV_32FC1 sum type
+CV_EXPORTS Ptr<BaseRowFilter_GPU> getRowSumFilter_GPU(int srcType, int sumType, int ksize, int anchor = -1);
+
+//! returns vertical 1D box filter
+//! supports only CV_8UC1 sum type and CV_32FC1 dst type
+CV_EXPORTS Ptr<BaseColumnFilter_GPU> getColumnSumFilter_GPU(int sumType, int dstType, int ksize, int anchor = -1);
+
+//! returns 2D box filter
+//! supports CV_8UC1 and CV_8UC4 source type, dst type must be the same as source type
+CV_EXPORTS Ptr<BaseFilter_GPU> getBoxFilter_GPU(int srcType, int dstType, const Size& ksize, Point anchor = Point(-1, -1));
+
+//! returns box filter engine
+CV_EXPORTS Ptr<FilterEngine_GPU> createBoxFilter_GPU(int srcType, int dstType, const Size& ksize,
+ const Point& anchor = Point(-1,-1));
+
+//! returns 2D morphological filter
+//! only MORPH_ERODE and MORPH_DILATE are supported
+//! supports CV_8UC1 and CV_8UC4 types
+//! kernel must have CV_8UC1 type, one rows and cols == ksize.width * ksize.height
+CV_EXPORTS Ptr<BaseFilter_GPU> getMorphologyFilter_GPU(int op, int type, const Mat& kernel, const Size& ksize,
+ Point anchor=Point(-1,-1));
+
+//! returns morphological filter engine. Only MORPH_ERODE and MORPH_DILATE are supported.
+CV_EXPORTS Ptr<FilterEngine_GPU> createMorphologyFilter_GPU(int op, int type, const Mat& kernel,
+ const Point& anchor = Point(-1,-1), int iterations = 1);
+CV_EXPORTS Ptr<FilterEngine_GPU> createMorphologyFilter_GPU(int op, int type, const Mat& kernel, GpuMat& buf,
+ const Point& anchor = Point(-1,-1), int iterations = 1);
+
+//! returns 2D filter with the specified kernel
+//! supports CV_8U, CV_16U and CV_32F one and four channel image
+CV_EXPORTS Ptr<BaseFilter_GPU> getLinearFilter_GPU(int srcType, int dstType, const Mat& kernel, Point anchor = Point(-1, -1), int borderType = BORDER_DEFAULT);
+
+//! returns the non-separable linear filter engine
+CV_EXPORTS Ptr<FilterEngine_GPU> createLinearFilter_GPU(int srcType, int dstType, const Mat& kernel,
+ Point anchor = Point(-1,-1), int borderType = BORDER_DEFAULT);
+
+//! returns the primitive row filter with the specified kernel.
+//! supports only CV_8UC1, CV_8UC4, CV_16SC1, CV_16SC2, CV_32SC1, CV_32FC1 source type.
+//! there are two version of algorithm: NPP and OpenCV.
+//! NPP calls when srcType == CV_8UC1 or srcType == CV_8UC4 and bufType == srcType,
+//! otherwise calls OpenCV version.
+//! NPP supports only BORDER_CONSTANT border type.
+//! OpenCV version supports only CV_32F as buffer depth and
+//! BORDER_REFLECT101, BORDER_REPLICATE and BORDER_CONSTANT border types.
+CV_EXPORTS Ptr<BaseRowFilter_GPU> getLinearRowFilter_GPU(int srcType, int bufType, const Mat& rowKernel,
+ int anchor = -1, int borderType = BORDER_DEFAULT);
+
+//! returns the primitive column filter with the specified kernel.
+//! supports only CV_8UC1, CV_8UC4, CV_16SC1, CV_16SC2, CV_32SC1, CV_32FC1 dst type.
+//! there are two version of algorithm: NPP and OpenCV.
+//! NPP calls when dstType == CV_8UC1 or dstType == CV_8UC4 and bufType == dstType,
+//! otherwise calls OpenCV version.
+//! NPP supports only BORDER_CONSTANT border type.
+//! OpenCV version supports only CV_32F as buffer depth and
+//! BORDER_REFLECT101, BORDER_REPLICATE and BORDER_CONSTANT border types.
+CV_EXPORTS Ptr<BaseColumnFilter_GPU> getLinearColumnFilter_GPU(int bufType, int dstType, const Mat& columnKernel,
+ int anchor = -1, int borderType = BORDER_DEFAULT);
+
+//! returns the separable linear filter engine
+CV_EXPORTS Ptr<FilterEngine_GPU> createSeparableLinearFilter_GPU(int srcType, int dstType, const Mat& rowKernel,
+ const Mat& columnKernel, const Point& anchor = Point(-1,-1), int rowBorderType = BORDER_DEFAULT,
+ int columnBorderType = -1);
+CV_EXPORTS Ptr<FilterEngine_GPU> createSeparableLinearFilter_GPU(int srcType, int dstType, const Mat& rowKernel,
+ const Mat& columnKernel, GpuMat& buf, const Point& anchor = Point(-1,-1), int rowBorderType = BORDER_DEFAULT,
+ int columnBorderType = -1);
+
+//! returns filter engine for the generalized Sobel operator
+CV_EXPORTS Ptr<FilterEngine_GPU> createDerivFilter_GPU(int srcType, int dstType, int dx, int dy, int ksize,
+ int rowBorderType = BORDER_DEFAULT, int columnBorderType = -1);
+CV_EXPORTS Ptr<FilterEngine_GPU> createDerivFilter_GPU(int srcType, int dstType, int dx, int dy, int ksize, GpuMat& buf,
+ int rowBorderType = BORDER_DEFAULT, int columnBorderType = -1);
+
+//! returns the Gaussian filter engine
+CV_EXPORTS Ptr<FilterEngine_GPU> createGaussianFilter_GPU(int type, Size ksize, double sigma1, double sigma2 = 0,
+ int rowBorderType = BORDER_DEFAULT, int columnBorderType = -1);
+CV_EXPORTS Ptr<FilterEngine_GPU> createGaussianFilter_GPU(int type, Size ksize, GpuMat& buf, double sigma1, double sigma2 = 0,
+ int rowBorderType = BORDER_DEFAULT, int columnBorderType = -1);
+
+//! returns maximum filter
+CV_EXPORTS Ptr<BaseFilter_GPU> getMaxFilter_GPU(int srcType, int dstType, const Size& ksize, Point anchor = Point(-1,-1));
+
+//! returns minimum filter
+CV_EXPORTS Ptr<BaseFilter_GPU> getMinFilter_GPU(int srcType, int dstType, const Size& ksize, Point anchor = Point(-1,-1));
+
+//! smooths the image using the normalized box filter
+//! supports CV_8UC1, CV_8UC4 types
+CV_EXPORTS void boxFilter(const GpuMat& src, GpuMat& dst, int ddepth, Size ksize, Point anchor = Point(-1,-1), Stream& stream = Stream::Null());
+
+//! a synonym for normalized box filter
+static inline void blur(const GpuMat& src, GpuMat& dst, Size ksize, Point anchor = Point(-1,-1), Stream& stream = Stream::Null())
+{
+ boxFilter(src, dst, -1, ksize, anchor, stream);
+}
+
+//! erodes the image (applies the local minimum operator)
+CV_EXPORTS void erode(const GpuMat& src, GpuMat& dst, const Mat& kernel, Point anchor = Point(-1, -1), int iterations = 1);
+CV_EXPORTS void erode(const GpuMat& src, GpuMat& dst, const Mat& kernel, GpuMat& buf,
+ Point anchor = Point(-1, -1), int iterations = 1,
+ Stream& stream = Stream::Null());
+
+//! dilates the image (applies the local maximum operator)
+CV_EXPORTS void dilate(const GpuMat& src, GpuMat& dst, const Mat& kernel, Point anchor = Point(-1, -1), int iterations = 1);
+CV_EXPORTS void dilate(const GpuMat& src, GpuMat& dst, const Mat& kernel, GpuMat& buf,
+ Point anchor = Point(-1, -1), int iterations = 1,
+ Stream& stream = Stream::Null());
+
+//! applies an advanced morphological operation to the image
+CV_EXPORTS void morphologyEx(const GpuMat& src, GpuMat& dst, int op, const Mat& kernel, Point anchor = Point(-1, -1), int iterations = 1);
+CV_EXPORTS void morphologyEx(const GpuMat& src, GpuMat& dst, int op, const Mat& kernel, GpuMat& buf1, GpuMat& buf2,
+ Point anchor = Point(-1, -1), int iterations = 1, Stream& stream = Stream::Null());
+
+//! applies non-separable 2D linear filter to the image
+CV_EXPORTS void filter2D(const GpuMat& src, GpuMat& dst, int ddepth, const Mat& kernel, Point anchor=Point(-1,-1), int borderType = BORDER_DEFAULT, Stream& stream = Stream::Null());
+
+//! applies separable 2D linear filter to the image
+CV_EXPORTS void sepFilter2D(const GpuMat& src, GpuMat& dst, int ddepth, const Mat& kernelX, const Mat& kernelY,
+ Point anchor = Point(-1,-1), int rowBorderType = BORDER_DEFAULT, int columnBorderType = -1);
+CV_EXPORTS void sepFilter2D(const GpuMat& src, GpuMat& dst, int ddepth, const Mat& kernelX, const Mat& kernelY, GpuMat& buf,
+ Point anchor = Point(-1,-1), int rowBorderType = BORDER_DEFAULT, int columnBorderType = -1,
+ Stream& stream = Stream::Null());
+
+//! applies generalized Sobel operator to the image
+CV_EXPORTS void Sobel(const GpuMat& src, GpuMat& dst, int ddepth, int dx, int dy, int ksize = 3, double scale = 1,
+ int rowBorderType = BORDER_DEFAULT, int columnBorderType = -1);
+CV_EXPORTS void Sobel(const GpuMat& src, GpuMat& dst, int ddepth, int dx, int dy, GpuMat& buf, int ksize = 3, double scale = 1,
+ int rowBorderType = BORDER_DEFAULT, int columnBorderType = -1, Stream& stream = Stream::Null());
+
+//! applies the vertical or horizontal Scharr operator to the image
+CV_EXPORTS void Scharr(const GpuMat& src, GpuMat& dst, int ddepth, int dx, int dy, double scale = 1,
+ int rowBorderType = BORDER_DEFAULT, int columnBorderType = -1);
+CV_EXPORTS void Scharr(const GpuMat& src, GpuMat& dst, int ddepth, int dx, int dy, GpuMat& buf, double scale = 1,
+ int rowBorderType = BORDER_DEFAULT, int columnBorderType = -1, Stream& stream = Stream::Null());
+
+//! smooths the image using Gaussian filter.
+CV_EXPORTS void GaussianBlur(const GpuMat& src, GpuMat& dst, Size ksize, double sigma1, double sigma2 = 0,
+ int rowBorderType = BORDER_DEFAULT, int columnBorderType = -1);
+CV_EXPORTS void GaussianBlur(const GpuMat& src, GpuMat& dst, Size ksize, GpuMat& buf, double sigma1, double sigma2 = 0,
+ int rowBorderType = BORDER_DEFAULT, int columnBorderType = -1, Stream& stream = Stream::Null());
+
+//! applies Laplacian operator to the image
+//! supports only ksize = 1 and ksize = 3
+CV_EXPORTS void Laplacian(const GpuMat& src, GpuMat& dst, int ddepth, int ksize = 1, double scale = 1, int borderType = BORDER_DEFAULT, Stream& stream = Stream::Null());
+
+
+////////////////////////////// Arithmetics ///////////////////////////////////
+
+//! implements generalized matrix product algorithm GEMM from BLAS
+CV_EXPORTS void gemm(const GpuMat& src1, const GpuMat& src2, double alpha,
+ const GpuMat& src3, double beta, GpuMat& dst, int flags = 0, Stream& stream = Stream::Null());
+
+//! transposes the matrix
+//! supports matrix with element size = 1, 4 and 8 bytes (CV_8UC1, CV_8UC4, CV_16UC2, CV_32FC1, etc)
+CV_EXPORTS void transpose(const GpuMat& src1, GpuMat& dst, Stream& stream = Stream::Null());
+
+//! reverses the order of the rows, columns or both in a matrix
+//! supports 1, 3 and 4 channels images with CV_8U, CV_16U, CV_32S or CV_32F depth
+CV_EXPORTS void flip(const GpuMat& a, GpuMat& b, int flipCode, Stream& stream = Stream::Null());
+
+//! transforms 8-bit unsigned integers using lookup table: dst(i)=lut(src(i))
+//! destination array will have the depth type as lut and the same channels number as source
+//! supports CV_8UC1, CV_8UC3 types
+CV_EXPORTS void LUT(const GpuMat& src, const Mat& lut, GpuMat& dst, Stream& stream = Stream::Null());
+
+//! makes multi-channel array out of several single-channel arrays
+CV_EXPORTS void merge(const GpuMat* src, size_t n, GpuMat& dst, Stream& stream = Stream::Null());
+
+//! makes multi-channel array out of several single-channel arrays
+CV_EXPORTS void merge(const vector<GpuMat>& src, GpuMat& dst, Stream& stream = Stream::Null());
+
+//! copies each plane of a multi-channel array to a dedicated array
+CV_EXPORTS void split(const GpuMat& src, GpuMat* dst, Stream& stream = Stream::Null());
+
+//! copies each plane of a multi-channel array to a dedicated array
+CV_EXPORTS void split(const GpuMat& src, vector<GpuMat>& dst, Stream& stream = Stream::Null());
+
+//! computes magnitude of complex (x(i).re, x(i).im) vector
+//! supports only CV_32FC2 type
+CV_EXPORTS void magnitude(const GpuMat& xy, GpuMat& magnitude, Stream& stream = Stream::Null());
+
+//! computes squared magnitude of complex (x(i).re, x(i).im) vector
+//! supports only CV_32FC2 type
+CV_EXPORTS void magnitudeSqr(const GpuMat& xy, GpuMat& magnitude, Stream& stream = Stream::Null());
+
+//! computes magnitude of each (x(i), y(i)) vector
+//! supports only floating-point source
+CV_EXPORTS void magnitude(const GpuMat& x, const GpuMat& y, GpuMat& magnitude, Stream& stream = Stream::Null());
+
+//! computes squared magnitude of each (x(i), y(i)) vector
+//! supports only floating-point source
+CV_EXPORTS void magnitudeSqr(const GpuMat& x, const GpuMat& y, GpuMat& magnitude, Stream& stream = Stream::Null());
+
+//! computes angle (angle(i)) of each (x(i), y(i)) vector
+//! supports only floating-point source
+CV_EXPORTS void phase(const GpuMat& x, const GpuMat& y, GpuMat& angle, bool angleInDegrees = false, Stream& stream = Stream::Null());
+
+//! converts Cartesian coordinates to polar
+//! supports only floating-point source
+CV_EXPORTS void cartToPolar(const GpuMat& x, const GpuMat& y, GpuMat& magnitude, GpuMat& angle, bool angleInDegrees = false, Stream& stream = Stream::Null());
+
+//! converts polar coordinates to Cartesian
+//! supports only floating-point source
+CV_EXPORTS void polarToCart(const GpuMat& magnitude, const GpuMat& angle, GpuMat& x, GpuMat& y, bool angleInDegrees = false, Stream& stream = Stream::Null());
+
+//! scales and shifts array elements so that either the specified norm (alpha) or the minimum (alpha) and maximum (beta) array values get the specified values
+CV_EXPORTS void normalize(const GpuMat& src, GpuMat& dst, double alpha = 1, double beta = 0,
+ int norm_type = NORM_L2, int dtype = -1, const GpuMat& mask = GpuMat());
+CV_EXPORTS void normalize(const GpuMat& src, GpuMat& dst, double a, double b,
+ int norm_type, int dtype, const GpuMat& mask, GpuMat& norm_buf, GpuMat& cvt_buf);
+
+
+//////////////////////////// Per-element operations ////////////////////////////////////
+
+//! adds one matrix to another (c = a + b)
+CV_EXPORTS void add(const GpuMat& a, const GpuMat& b, GpuMat& c, const GpuMat& mask = GpuMat(), int dtype = -1, Stream& stream = Stream::Null());
+//! adds scalar to a matrix (c = a + s)
+CV_EXPORTS void add(const GpuMat& a, const Scalar& sc, GpuMat& c, const GpuMat& mask = GpuMat(), int dtype = -1, Stream& stream = Stream::Null());
+
+//! subtracts one matrix from another (c = a - b)
+CV_EXPORTS void subtract(const GpuMat& a, const GpuMat& b, GpuMat& c, const GpuMat& mask = GpuMat(), int dtype = -1, Stream& stream = Stream::Null());
+//! subtracts scalar from a matrix (c = a - s)
+CV_EXPORTS void subtract(const GpuMat& a, const Scalar& sc, GpuMat& c, const GpuMat& mask = GpuMat(), int dtype = -1, Stream& stream = Stream::Null());
+
+//! computes element-wise weighted product of the two arrays (c = scale * a * b)
+CV_EXPORTS void multiply(const GpuMat& a, const GpuMat& b, GpuMat& c, double scale = 1, int dtype = -1, Stream& stream = Stream::Null());
+//! weighted multiplies matrix to a scalar (c = scale * a * s)
+CV_EXPORTS void multiply(const GpuMat& a, const Scalar& sc, GpuMat& c, double scale = 1, int dtype = -1, Stream& stream = Stream::Null());
+
+//! computes element-wise weighted quotient of the two arrays (c = a / b)
+CV_EXPORTS void divide(const GpuMat& a, const GpuMat& b, GpuMat& c, double scale = 1, int dtype = -1, Stream& stream = Stream::Null());
+//! computes element-wise weighted quotient of matrix and scalar (c = a / s)
+CV_EXPORTS void divide(const GpuMat& a, const Scalar& sc, GpuMat& c, double scale = 1, int dtype = -1, Stream& stream = Stream::Null());
+//! computes element-wise weighted reciprocal of an array (dst = scale/src2)
+CV_EXPORTS void divide(double scale, const GpuMat& b, GpuMat& c, int dtype = -1, Stream& stream = Stream::Null());
+
+//! computes the weighted sum of two arrays (dst = alpha*src1 + beta*src2 + gamma)
+CV_EXPORTS void addWeighted(const GpuMat& src1, double alpha, const GpuMat& src2, double beta, double gamma, GpuMat& dst,
+ int dtype = -1, Stream& stream = Stream::Null());
+
+//! adds scaled array to another one (dst = alpha*src1 + src2)
+static inline void scaleAdd(const GpuMat& src1, double alpha, const GpuMat& src2, GpuMat& dst, Stream& stream = Stream::Null())
+{
+ addWeighted(src1, alpha, src2, 1.0, 0.0, dst, -1, stream);
+}
+
+//! computes element-wise absolute difference of two arrays (c = abs(a - b))
+CV_EXPORTS void absdiff(const GpuMat& a, const GpuMat& b, GpuMat& c, Stream& stream = Stream::Null());
+//! computes element-wise absolute difference of array and scalar (c = abs(a - s))
+CV_EXPORTS void absdiff(const GpuMat& a, const Scalar& s, GpuMat& c, Stream& stream = Stream::Null());
+
+//! computes absolute value of each matrix element
+//! supports CV_16S and CV_32F depth
+CV_EXPORTS void abs(const GpuMat& src, GpuMat& dst, Stream& stream = Stream::Null());
+
+//! computes square of each pixel in an image
+//! supports CV_8U, CV_16U, CV_16S and CV_32F depth
+CV_EXPORTS void sqr(const GpuMat& src, GpuMat& dst, Stream& stream = Stream::Null());
+
+//! computes square root of each pixel in an image
+//! supports CV_8U, CV_16U, CV_16S and CV_32F depth
+CV_EXPORTS void sqrt(const GpuMat& src, GpuMat& dst, Stream& stream = Stream::Null());
+
+//! computes exponent of each matrix element (b = e**a)
+//! supports CV_8U, CV_16U, CV_16S and CV_32F depth
+CV_EXPORTS void exp(const GpuMat& a, GpuMat& b, Stream& stream = Stream::Null());
+
+//! computes natural logarithm of absolute value of each matrix element: b = log(abs(a))
+//! supports CV_8U, CV_16U, CV_16S and CV_32F depth
+CV_EXPORTS void log(const GpuMat& a, GpuMat& b, Stream& stream = Stream::Null());
+
+//! computes power of each matrix element:
+// (dst(i,j) = pow( src(i,j) , power), if src.type() is integer
+// (dst(i,j) = pow(fabs(src(i,j)), power), otherwise
+//! supports all, except depth == CV_64F
+CV_EXPORTS void pow(const GpuMat& src, double power, GpuMat& dst, Stream& stream = Stream::Null());
+
+//! compares elements of two arrays (c = a \<cmpop\> b)
+CV_EXPORTS void compare(const GpuMat& a, const GpuMat& b, GpuMat& c, int cmpop, Stream& stream = Stream::Null());
+CV_EXPORTS void compare(const GpuMat& a, Scalar sc, GpuMat& c, int cmpop, Stream& stream = Stream::Null());
+
+//! performs per-elements bit-wise inversion
+CV_EXPORTS void bitwise_not(const GpuMat& src, GpuMat& dst, const GpuMat& mask=GpuMat(), Stream& stream = Stream::Null());
+
+//! calculates per-element bit-wise disjunction of two arrays
+CV_EXPORTS void bitwise_or(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, const GpuMat& mask=GpuMat(), Stream& stream = Stream::Null());
+//! calculates per-element bit-wise disjunction of array and scalar
+//! supports 1, 3 and 4 channels images with CV_8U, CV_16U or CV_32S depth
+CV_EXPORTS void bitwise_or(const GpuMat& src1, const Scalar& sc, GpuMat& dst, Stream& stream = Stream::Null());
+
+//! calculates per-element bit-wise conjunction of two arrays
+CV_EXPORTS void bitwise_and(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, const GpuMat& mask=GpuMat(), Stream& stream = Stream::Null());
+//! calculates per-element bit-wise conjunction of array and scalar
+//! supports 1, 3 and 4 channels images with CV_8U, CV_16U or CV_32S depth
+CV_EXPORTS void bitwise_and(const GpuMat& src1, const Scalar& sc, GpuMat& dst, Stream& stream = Stream::Null());
+
+//! calculates per-element bit-wise "exclusive or" operation
+CV_EXPORTS void bitwise_xor(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, const GpuMat& mask=GpuMat(), Stream& stream = Stream::Null());
+//! calculates per-element bit-wise "exclusive or" of array and scalar
+//! supports 1, 3 and 4 channels images with CV_8U, CV_16U or CV_32S depth
+CV_EXPORTS void bitwise_xor(const GpuMat& src1, const Scalar& sc, GpuMat& dst, Stream& stream = Stream::Null());
+
+//! pixel by pixel right shift of an image by a constant value
+//! supports 1, 3 and 4 channels images with integers elements
+CV_EXPORTS void rshift(const GpuMat& src, Scalar_<int> sc, GpuMat& dst, Stream& stream = Stream::Null());
+
+//! pixel by pixel left shift of an image by a constant value
+//! supports 1, 3 and 4 channels images with CV_8U, CV_16U or CV_32S depth
+CV_EXPORTS void lshift(const GpuMat& src, Scalar_<int> sc, GpuMat& dst, Stream& stream = Stream::Null());
+
+//! computes per-element minimum of two arrays (dst = min(src1, src2))
+CV_EXPORTS void min(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, Stream& stream = Stream::Null());
+
+//! computes per-element minimum of array and scalar (dst = min(src1, src2))
+CV_EXPORTS void min(const GpuMat& src1, double src2, GpuMat& dst, Stream& stream = Stream::Null());
+
+//! computes per-element maximum of two arrays (dst = max(src1, src2))
+CV_EXPORTS void max(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, Stream& stream = Stream::Null());
+
+//! computes per-element maximum of array and scalar (dst = max(src1, src2))
+CV_EXPORTS void max(const GpuMat& src1, double src2, GpuMat& dst, Stream& stream = Stream::Null());
+
+enum { ALPHA_OVER, ALPHA_IN, ALPHA_OUT, ALPHA_ATOP, ALPHA_XOR, ALPHA_PLUS, ALPHA_OVER_PREMUL, ALPHA_IN_PREMUL, ALPHA_OUT_PREMUL,
+ ALPHA_ATOP_PREMUL, ALPHA_XOR_PREMUL, ALPHA_PLUS_PREMUL, ALPHA_PREMUL};
+
+//! Composite two images using alpha opacity values contained in each image
+//! Supports CV_8UC4, CV_16UC4, CV_32SC4 and CV_32FC4 types
+CV_EXPORTS void alphaComp(const GpuMat& img1, const GpuMat& img2, GpuMat& dst, int alpha_op, Stream& stream = Stream::Null());
+
+
+////////////////////////////// Image processing //////////////////////////////
+
+//! DST[x,y] = SRC[xmap[x,y],ymap[x,y]]
+//! supports only CV_32FC1 map type
+CV_EXPORTS void remap(const GpuMat& src, GpuMat& dst, const GpuMat& xmap, const GpuMat& ymap,
+ int interpolation, int borderMode = BORDER_CONSTANT, Scalar borderValue = Scalar(),
+ Stream& stream = Stream::Null());
+
+//! Does mean shift filtering on GPU.
+CV_EXPORTS void meanShiftFiltering(const GpuMat& src, GpuMat& dst, int sp, int sr,
+ TermCriteria criteria = TermCriteria(TermCriteria::MAX_ITER + TermCriteria::EPS, 5, 1),
+ Stream& stream = Stream::Null());
+
+//! Does mean shift procedure on GPU.
+CV_EXPORTS void meanShiftProc(const GpuMat& src, GpuMat& dstr, GpuMat& dstsp, int sp, int sr,
+ TermCriteria criteria = TermCriteria(TermCriteria::MAX_ITER + TermCriteria::EPS, 5, 1),
+ Stream& stream = Stream::Null());
+
+//! Does mean shift segmentation with elimination of small regions.
+CV_EXPORTS void meanShiftSegmentation(const GpuMat& src, Mat& dst, int sp, int sr, int minsize,
+ TermCriteria criteria = TermCriteria(TermCriteria::MAX_ITER + TermCriteria::EPS, 5, 1));
+
+//! Does coloring of disparity image: [0..ndisp) -> [0..240, 1, 1] in HSV.
+//! Supported types of input disparity: CV_8U, CV_16S.
+//! Output disparity has CV_8UC4 type in BGRA format (alpha = 255).
+CV_EXPORTS void drawColorDisp(const GpuMat& src_disp, GpuMat& dst_disp, int ndisp, Stream& stream = Stream::Null());
+
+//! Reprojects disparity image to 3D space.
+//! Supports CV_8U and CV_16S types of input disparity.
+//! The output is a 3- or 4-channel floating-point matrix.
+//! Each element of this matrix will contain the 3D coordinates of the point (x,y,z,1), computed from the disparity map.
+//! Q is the 4x4 perspective transformation matrix that can be obtained with cvStereoRectify.
+CV_EXPORTS void reprojectImageTo3D(const GpuMat& disp, GpuMat& xyzw, const Mat& Q, int dst_cn = 4, Stream& stream = Stream::Null());
+
+//! converts image from one color space to another
+CV_EXPORTS void cvtColor(const GpuMat& src, GpuMat& dst, int code, int dcn = 0, Stream& stream = Stream::Null());
+
+enum
+{
+ // Bayer Demosaicing (Malvar, He, and Cutler)
+ COLOR_BayerBG2BGR_MHT = 256,
+ COLOR_BayerGB2BGR_MHT = 257,
+ COLOR_BayerRG2BGR_MHT = 258,
+ COLOR_BayerGR2BGR_MHT = 259,
+
+ COLOR_BayerBG2RGB_MHT = COLOR_BayerRG2BGR_MHT,
+ COLOR_BayerGB2RGB_MHT = COLOR_BayerGR2BGR_MHT,
+ COLOR_BayerRG2RGB_MHT = COLOR_BayerBG2BGR_MHT,
+ COLOR_BayerGR2RGB_MHT = COLOR_BayerGB2BGR_MHT,
+
+ COLOR_BayerBG2GRAY_MHT = 260,
+ COLOR_BayerGB2GRAY_MHT = 261,
+ COLOR_BayerRG2GRAY_MHT = 262,
+ COLOR_BayerGR2GRAY_MHT = 263
+};
+CV_EXPORTS void demosaicing(const GpuMat& src, GpuMat& dst, int code, int dcn = -1, Stream& stream = Stream::Null());
+
+//! swap channels
+//! dstOrder - Integer array describing how channel values are permutated. The n-th entry
+//! of the array contains the number of the channel that is stored in the n-th channel of
+//! the output image. E.g. Given an RGBA image, aDstOrder = [3,2,1,0] converts this to ABGR
+//! channel order.
+CV_EXPORTS void swapChannels(GpuMat& image, const int dstOrder[4], Stream& stream = Stream::Null());
+
+//! Routines for correcting image color gamma
+CV_EXPORTS void gammaCorrection(const GpuMat& src, GpuMat& dst, bool forward = true, Stream& stream = Stream::Null());
+
+//! applies fixed threshold to the image
+CV_EXPORTS double threshold(const GpuMat& src, GpuMat& dst, double thresh, double maxval, int type, Stream& stream = Stream::Null());
+
+//! resizes the image
+//! Supports INTER_NEAREST, INTER_LINEAR, INTER_CUBIC, INTER_AREA
+CV_EXPORTS void resize(const GpuMat& src, GpuMat& dst, Size dsize, double fx=0, double fy=0, int interpolation = INTER_LINEAR, Stream& stream = Stream::Null());
+
+//! warps the image using affine transformation
+//! Supports INTER_NEAREST, INTER_LINEAR, INTER_CUBIC
+CV_EXPORTS void warpAffine(const GpuMat& src, GpuMat& dst, const Mat& M, Size dsize, int flags = INTER_LINEAR,
+ int borderMode = BORDER_CONSTANT, Scalar borderValue = Scalar(), Stream& stream = Stream::Null());
+
+CV_EXPORTS void buildWarpAffineMaps(const Mat& M, bool inverse, Size dsize, GpuMat& xmap, GpuMat& ymap, Stream& stream = Stream::Null());
+
+//! warps the image using perspective transformation
+//! Supports INTER_NEAREST, INTER_LINEAR, INTER_CUBIC
+CV_EXPORTS void warpPerspective(const GpuMat& src, GpuMat& dst, const Mat& M, Size dsize, int flags = INTER_LINEAR,
+ int borderMode = BORDER_CONSTANT, Scalar borderValue = Scalar(), Stream& stream = Stream::Null());
+
+CV_EXPORTS void buildWarpPerspectiveMaps(const Mat& M, bool inverse, Size dsize, GpuMat& xmap, GpuMat& ymap, Stream& stream = Stream::Null());
+
+//! builds plane warping maps
+CV_EXPORTS void buildWarpPlaneMaps(Size src_size, Rect dst_roi, const Mat &K, const Mat& R, const Mat &T, float scale,
+ GpuMat& map_x, GpuMat& map_y, Stream& stream = Stream::Null());
+
+//! builds cylindrical warping maps
+CV_EXPORTS void buildWarpCylindricalMaps(Size src_size, Rect dst_roi, const Mat &K, const Mat& R, float scale,
+ GpuMat& map_x, GpuMat& map_y, Stream& stream = Stream::Null());
+
+//! builds spherical warping maps
+CV_EXPORTS void buildWarpSphericalMaps(Size src_size, Rect dst_roi, const Mat &K, const Mat& R, float scale,
+ GpuMat& map_x, GpuMat& map_y, Stream& stream = Stream::Null());
+
+//! rotates an image around the origin (0,0) and then shifts it
+//! supports INTER_NEAREST, INTER_LINEAR, INTER_CUBIC
+//! supports 1, 3 or 4 channels images with CV_8U, CV_16U or CV_32F depth
+CV_EXPORTS void rotate(const GpuMat& src, GpuMat& dst, Size dsize, double angle, double xShift = 0, double yShift = 0,
+ int interpolation = INTER_LINEAR, Stream& stream = Stream::Null());
+
+//! copies 2D array to a larger destination array and pads borders with user-specifiable constant
+CV_EXPORTS void copyMakeBorder(const GpuMat& src, GpuMat& dst, int top, int bottom, int left, int right, int borderType,
+ const Scalar& value = Scalar(), Stream& stream = Stream::Null());
+
+//! computes the integral image
+//! sum will have CV_32S type, but will contain unsigned int values
+//! supports only CV_8UC1 source type
+CV_EXPORTS void integral(const GpuMat& src, GpuMat& sum, Stream& stream = Stream::Null());
+//! buffered version
+CV_EXPORTS void integralBuffered(const GpuMat& src, GpuMat& sum, GpuMat& buffer, Stream& stream = Stream::Null());
+
+//! computes squared integral image
+//! result matrix will have 64F type, but will contain 64U values
+//! supports source images of 8UC1 type only
+CV_EXPORTS void sqrIntegral(const GpuMat& src, GpuMat& sqsum, Stream& stream = Stream::Null());
+
+//! computes vertical sum, supports only CV_32FC1 images
+CV_EXPORTS void columnSum(const GpuMat& src, GpuMat& sum);
+
+//! computes the standard deviation of integral images
+//! supports only CV_32SC1 source type and CV_32FC1 sqr type
+//! output will have CV_32FC1 type
+CV_EXPORTS void rectStdDev(const GpuMat& src, const GpuMat& sqr, GpuMat& dst, const Rect& rect, Stream& stream = Stream::Null());
+
+//! computes Harris cornerness criteria at each image pixel
+CV_EXPORTS void cornerHarris(const GpuMat& src, GpuMat& dst, int blockSize, int ksize, double k, int borderType = BORDER_REFLECT101);
+CV_EXPORTS void cornerHarris(const GpuMat& src, GpuMat& dst, GpuMat& Dx, GpuMat& Dy, int blockSize, int ksize, double k, int borderType = BORDER_REFLECT101);
+CV_EXPORTS void cornerHarris(const GpuMat& src, GpuMat& dst, GpuMat& Dx, GpuMat& Dy, GpuMat& buf, int blockSize, int ksize, double k,
+ int borderType = BORDER_REFLECT101, Stream& stream = Stream::Null());
+
+//! computes minimum eigen value of 2x2 derivative covariation matrix at each pixel - the cornerness criteria
+CV_EXPORTS void cornerMinEigenVal(const GpuMat& src, GpuMat& dst, int blockSize, int ksize, int borderType=BORDER_REFLECT101);
+CV_EXPORTS void cornerMinEigenVal(const GpuMat& src, GpuMat& dst, GpuMat& Dx, GpuMat& Dy, int blockSize, int ksize, int borderType=BORDER_REFLECT101);
+CV_EXPORTS void cornerMinEigenVal(const GpuMat& src, GpuMat& dst, GpuMat& Dx, GpuMat& Dy, GpuMat& buf, int blockSize, int ksize,
+ int borderType=BORDER_REFLECT101, Stream& stream = Stream::Null());
+
+//! performs per-element multiplication of two full (not packed) Fourier spectrums
+//! supports 32FC2 matrices only (interleaved format)
+CV_EXPORTS void mulSpectrums(const GpuMat& a, const GpuMat& b, GpuMat& c, int flags, bool conjB=false, Stream& stream = Stream::Null());
+
+//! performs per-element multiplication of two full (not packed) Fourier spectrums
+//! supports 32FC2 matrices only (interleaved format)
+CV_EXPORTS void mulAndScaleSpectrums(const GpuMat& a, const GpuMat& b, GpuMat& c, int flags, float scale, bool conjB=false, Stream& stream = Stream::Null());
+
+//! Performs a forward or inverse discrete Fourier transform (1D or 2D) of floating point matrix.
+//! Param dft_size is the size of DFT transform.
+//!
+//! If the source matrix is not continous, then additional copy will be done,
+//! so to avoid copying ensure the source matrix is continous one. If you want to use
+//! preallocated output ensure it is continuous too, otherwise it will be reallocated.
+//!
+//! Being implemented via CUFFT real-to-complex transform result contains only non-redundant values
+//! in CUFFT's format. Result as full complex matrix for such kind of transform cannot be retrieved.
+//!
+//! For complex-to-real transform it is assumed that the source matrix is packed in CUFFT's format.
+CV_EXPORTS void dft(const GpuMat& src, GpuMat& dst, Size dft_size, int flags=0, Stream& stream = Stream::Null());
+
+struct CV_EXPORTS ConvolveBuf
+{
+ Size result_size;
+ Size block_size;
+ Size user_block_size;
+ Size dft_size;
+ int spect_len;
+
+ GpuMat image_spect, templ_spect, result_spect;
+ GpuMat image_block, templ_block, result_data;
+
+ void create(Size image_size, Size templ_size);
+ static Size estimateBlockSize(Size result_size, Size templ_size);
+};
+
+
+//! computes convolution (or cross-correlation) of two images using discrete Fourier transform
+//! supports source images of 32FC1 type only
+//! result matrix will have 32FC1 type
+CV_EXPORTS void convolve(const GpuMat& image, const GpuMat& templ, GpuMat& result, bool ccorr = false);
+CV_EXPORTS void convolve(const GpuMat& image, const GpuMat& templ, GpuMat& result, bool ccorr, ConvolveBuf& buf, Stream& stream = Stream::Null());
+
+struct CV_EXPORTS MatchTemplateBuf
+{
+ Size user_block_size;
+ GpuMat imagef, templf;
+ std::vector<GpuMat> images;
+ std::vector<GpuMat> image_sums;
+ std::vector<GpuMat> image_sqsums;
+};
+
+//! computes the proximity map for the raster template and the image where the template is searched for
+CV_EXPORTS void matchTemplate(const GpuMat& image, const GpuMat& templ, GpuMat& result, int method, Stream &stream = Stream::Null());
+
+//! computes the proximity map for the raster template and the image where the template is searched for
+CV_EXPORTS void matchTemplate(const GpuMat& image, const GpuMat& templ, GpuMat& result, int method, MatchTemplateBuf &buf, Stream& stream = Stream::Null());
+
+//! smoothes the source image and downsamples it
+CV_EXPORTS void pyrDown(const GpuMat& src, GpuMat& dst, Stream& stream = Stream::Null());
+
+//! upsamples the source image and then smoothes it
+CV_EXPORTS void pyrUp(const GpuMat& src, GpuMat& dst, Stream& stream = Stream::Null());
+
+//! performs linear blending of two images
+//! to avoid accuracy errors sum of weigths shouldn't be very close to zero
+CV_EXPORTS void blendLinear(const GpuMat& img1, const GpuMat& img2, const GpuMat& weights1, const GpuMat& weights2,
+ GpuMat& result, Stream& stream = Stream::Null());
+
+//! Performa bilateral filtering of passsed image
+CV_EXPORTS void bilateralFilter(const GpuMat& src, GpuMat& dst, int kernel_size, float sigma_color, float sigma_spatial,
+ int borderMode = BORDER_DEFAULT, Stream& stream = Stream::Null());
+
+//! Brute force non-local means algorith (slow but universal)
+CV_EXPORTS void nonLocalMeans(const GpuMat& src, GpuMat& dst, float h, int search_window = 21, int block_size = 7, int borderMode = BORDER_DEFAULT, Stream& s = Stream::Null());
+
+//! Fast (but approximate)version of non-local means algorith similar to CPU function (running sums technique)
+class CV_EXPORTS FastNonLocalMeansDenoising
+{
+public:
+ //! Simple method, recommended for grayscale images (though it supports multichannel images)
+ void simpleMethod(const GpuMat& src, GpuMat& dst, float h, int search_window = 21, int block_size = 7, Stream& s = Stream::Null());
+
+ //! Processes luminance and color components separatelly
+ void labMethod(const GpuMat& src, GpuMat& dst, float h_luminance, float h_color, int search_window = 21, int block_size = 7, Stream& s = Stream::Null());
+
+private:
+
+ GpuMat buffer, extended_src_buffer;
+ GpuMat lab, l, ab;
+};
+
+struct CV_EXPORTS CannyBuf
+{
+ void create(const Size& image_size, int apperture_size = 3);
+ void release();
+
+ GpuMat dx, dy;
+ GpuMat mag;
+ GpuMat map;
+ GpuMat st1, st2;
+ GpuMat unused;
+ Ptr<FilterEngine_GPU> filterDX, filterDY;
+
+ CannyBuf() {}
+ explicit CannyBuf(const Size& image_size, int apperture_size = 3) {create(image_size, apperture_size);}
+ CannyBuf(const GpuMat& dx_, const GpuMat& dy_);
+};
+
+CV_EXPORTS void Canny(const GpuMat& image, GpuMat& edges, double low_thresh, double high_thresh, int apperture_size = 3, bool L2gradient = false);
+CV_EXPORTS void Canny(const GpuMat& image, CannyBuf& buf, GpuMat& edges, double low_thresh, double high_thresh, int apperture_size = 3, bool L2gradient = false);
+CV_EXPORTS void Canny(const GpuMat& dx, const GpuMat& dy, GpuMat& edges, double low_thresh, double high_thresh, bool L2gradient = false);
+CV_EXPORTS void Canny(const GpuMat& dx, const GpuMat& dy, CannyBuf& buf, GpuMat& edges, double low_thresh, double high_thresh, bool L2gradient = false);
+
+class CV_EXPORTS ImagePyramid
+{
+public:
+ inline ImagePyramid() : nLayers_(0) {}
+ inline ImagePyramid(const GpuMat& img, int nLayers, Stream& stream = Stream::Null())
+ {
+ build(img, nLayers, stream);
+ }
+
+ void build(const GpuMat& img, int nLayers, Stream& stream = Stream::Null());
+
+ void getLayer(GpuMat& outImg, Size outRoi, Stream& stream = Stream::Null()) const;
+
+ inline void release()
+ {
+ layer0_.release();
+ pyramid_.clear();
+ nLayers_ = 0;
+ }
+
+private:
+ GpuMat layer0_;
+ std::vector<GpuMat> pyramid_;
+ int nLayers_;
+};
+
+//! HoughLines
+
+struct HoughLinesBuf
+{
+ GpuMat accum;
+ GpuMat list;
+};
+
+CV_EXPORTS void HoughLines(const GpuMat& src, GpuMat& lines, float rho, float theta, int threshold, bool doSort = false, int maxLines = 4096);
+CV_EXPORTS void HoughLines(const GpuMat& src, GpuMat& lines, HoughLinesBuf& buf, float rho, float theta, int threshold, bool doSort = false, int maxLines = 4096);
+CV_EXPORTS void HoughLinesDownload(const GpuMat& d_lines, OutputArray h_lines, OutputArray h_votes = noArray());
+
+//! HoughLinesP
+
+//! finds line segments in the black-n-white image using probabalistic Hough transform
+CV_EXPORTS void HoughLinesP(const GpuMat& image, GpuMat& lines, HoughLinesBuf& buf, float rho, float theta, int minLineLength, int maxLineGap, int maxLines = 4096);
+
+//! HoughCircles
+
+struct HoughCirclesBuf
+{
+ GpuMat edges;
+ GpuMat accum;
+ GpuMat list;
+ CannyBuf cannyBuf;
+};
+
+CV_EXPORTS void HoughCircles(const GpuMat& src, GpuMat& circles, int method, float dp, float minDist, int cannyThreshold, int votesThreshold, int minRadius, int maxRadius, int maxCircles = 4096);
+CV_EXPORTS void HoughCircles(const GpuMat& src, GpuMat& circles, HoughCirclesBuf& buf, int method, float dp, float minDist, int cannyThreshold, int votesThreshold, int minRadius, int maxRadius, int maxCircles = 4096);
+CV_EXPORTS void HoughCirclesDownload(const GpuMat& d_circles, OutputArray h_circles);
+
+//! finds arbitrary template in the grayscale image using Generalized Hough Transform
+//! Ballard, D.H. (1981). Generalizing the Hough transform to detect arbitrary shapes. Pattern Recognition 13 (2): 111-122.
+//! Guil, N., González-Linares, J.M. and Zapata, E.L. (1999). Bidimensional shape detection using an invariant approach. Pattern Recognition 32 (6): 1025-1038.
+class CV_EXPORTS GeneralizedHough_GPU : public Algorithm
+{
+public:
+ static Ptr<GeneralizedHough_GPU> create(int method);
+
+ virtual ~GeneralizedHough_GPU();
+
+ //! set template to search
+ void setTemplate(const GpuMat& templ, int cannyThreshold = 100, Point templCenter = Point(-1, -1));
+ void setTemplate(const GpuMat& edges, const GpuMat& dx, const GpuMat& dy, Point templCenter = Point(-1, -1));
+
+ //! find template on image
+ void detect(const GpuMat& image, GpuMat& positions, int cannyThreshold = 100);
+ void detect(const GpuMat& edges, const GpuMat& dx, const GpuMat& dy, GpuMat& positions);
+
+ void download(const GpuMat& d_positions, OutputArray h_positions, OutputArray h_votes = noArray());
+
+ void release();
+
+protected:
+ virtual void setTemplateImpl(const GpuMat& edges, const GpuMat& dx, const GpuMat& dy, Point templCenter) = 0;
+ virtual void detectImpl(const GpuMat& edges, const GpuMat& dx, const GpuMat& dy, GpuMat& positions) = 0;
+ virtual void releaseImpl() = 0;
+
+private:
+ GpuMat edges_;
+ CannyBuf cannyBuf_;
+};
+
+////////////////////////////// Matrix reductions //////////////////////////////
+
+//! computes mean value and standard deviation of all or selected array elements
+//! supports only CV_8UC1 type
+CV_EXPORTS void meanStdDev(const GpuMat& mtx, Scalar& mean, Scalar& stddev);
+//! buffered version
+CV_EXPORTS void meanStdDev(const GpuMat& mtx, Scalar& mean, Scalar& stddev, GpuMat& buf);
+
+//! computes norm of array
+//! supports NORM_INF, NORM_L1, NORM_L2
+//! supports all matrices except 64F
+CV_EXPORTS double norm(const GpuMat& src1, int normType=NORM_L2);
+CV_EXPORTS double norm(const GpuMat& src1, int normType, GpuMat& buf);
+CV_EXPORTS double norm(const GpuMat& src1, int normType, const GpuMat& mask, GpuMat& buf);
+
+//! computes norm of the difference between two arrays
+//! supports NORM_INF, NORM_L1, NORM_L2
+//! supports only CV_8UC1 type
+CV_EXPORTS double norm(const GpuMat& src1, const GpuMat& src2, int normType=NORM_L2);
+
+//! computes sum of array elements
+//! supports only single channel images
+CV_EXPORTS Scalar sum(const GpuMat& src);
+CV_EXPORTS Scalar sum(const GpuMat& src, GpuMat& buf);
+CV_EXPORTS Scalar sum(const GpuMat& src, const GpuMat& mask, GpuMat& buf);
+
+//! computes sum of array elements absolute values
+//! supports only single channel images
+CV_EXPORTS Scalar absSum(const GpuMat& src);
+CV_EXPORTS Scalar absSum(const GpuMat& src, GpuMat& buf);
+CV_EXPORTS Scalar absSum(const GpuMat& src, const GpuMat& mask, GpuMat& buf);
+
+//! computes squared sum of array elements
+//! supports only single channel images
+CV_EXPORTS Scalar sqrSum(const GpuMat& src);
+CV_EXPORTS Scalar sqrSum(const GpuMat& src, GpuMat& buf);
+CV_EXPORTS Scalar sqrSum(const GpuMat& src, const GpuMat& mask, GpuMat& buf);
+
+//! finds global minimum and maximum array elements and returns their values
+CV_EXPORTS void minMax(const GpuMat& src, double* minVal, double* maxVal=0, const GpuMat& mask=GpuMat());
+CV_EXPORTS void minMax(const GpuMat& src, double* minVal, double* maxVal, const GpuMat& mask, GpuMat& buf);
+
+//! finds global minimum and maximum array elements and returns their values with locations
+CV_EXPORTS void minMaxLoc(const GpuMat& src, double* minVal, double* maxVal=0, Point* minLoc=0, Point* maxLoc=0,
+ const GpuMat& mask=GpuMat());
+CV_EXPORTS void minMaxLoc(const GpuMat& src, double* minVal, double* maxVal, Point* minLoc, Point* maxLoc,
+ const GpuMat& mask, GpuMat& valbuf, GpuMat& locbuf);
+
+//! counts non-zero array elements
+CV_EXPORTS int countNonZero(const GpuMat& src);
+CV_EXPORTS int countNonZero(const GpuMat& src, GpuMat& buf);
+
+//! reduces a matrix to a vector
+CV_EXPORTS void reduce(const GpuMat& mtx, GpuMat& vec, int dim, int reduceOp, int dtype = -1, Stream& stream = Stream::Null());
+
+
+///////////////////////////// Calibration 3D //////////////////////////////////
+
+CV_EXPORTS void transformPoints(const GpuMat& src, const Mat& rvec, const Mat& tvec,
+ GpuMat& dst, Stream& stream = Stream::Null());
+
+CV_EXPORTS void projectPoints(const GpuMat& src, const Mat& rvec, const Mat& tvec,
+ const Mat& camera_mat, const Mat& dist_coef, GpuMat& dst,
+ Stream& stream = Stream::Null());
+
+CV_EXPORTS void solvePnPRansac(const Mat& object, const Mat& image, const Mat& camera_mat,
+ const Mat& dist_coef, Mat& rvec, Mat& tvec, bool use_extrinsic_guess=false,
+ int num_iters=100, float max_dist=8.0, int min_inlier_count=100,
+ std::vector<int>* inliers=NULL);
+
+//////////////////////////////// Image Labeling ////////////////////////////////
+
+//!performs labeling via graph cuts of a 2D regular 4-connected graph.
+CV_EXPORTS void graphcut(GpuMat& terminals, GpuMat& leftTransp, GpuMat& rightTransp, GpuMat& top, GpuMat& bottom, GpuMat& labels,
+ GpuMat& buf, Stream& stream = Stream::Null());
+
+//!performs labeling via graph cuts of a 2D regular 8-connected graph.
+CV_EXPORTS void graphcut(GpuMat& terminals, GpuMat& leftTransp, GpuMat& rightTransp, GpuMat& top, GpuMat& topLeft, GpuMat& topRight,
+ GpuMat& bottom, GpuMat& bottomLeft, GpuMat& bottomRight,
+ GpuMat& labels,
+ GpuMat& buf, Stream& stream = Stream::Null());
+
+//! compute mask for Generalized Flood fill componetns labeling.
+CV_EXPORTS void connectivityMask(const GpuMat& image, GpuMat& mask, const cv::Scalar& lo, const cv::Scalar& hi, Stream& stream = Stream::Null());
+
+//! performs connected componnents labeling.
+CV_EXPORTS void labelComponents(const GpuMat& mask, GpuMat& components, int flags = 0, Stream& stream = Stream::Null());
+
+////////////////////////////////// Histograms //////////////////////////////////
+
+//! Compute levels with even distribution. levels will have 1 row and nLevels cols and CV_32SC1 type.
+CV_EXPORTS void evenLevels(GpuMat& levels, int nLevels, int lowerLevel, int upperLevel);
+//! Calculates histogram with evenly distributed bins for signle channel source.
+//! Supports CV_8UC1, CV_16UC1 and CV_16SC1 source types.
+//! Output hist will have one row and histSize cols and CV_32SC1 type.
+CV_EXPORTS void histEven(const GpuMat& src, GpuMat& hist, int histSize, int lowerLevel, int upperLevel, Stream& stream = Stream::Null());
+CV_EXPORTS void histEven(const GpuMat& src, GpuMat& hist, GpuMat& buf, int histSize, int lowerLevel, int upperLevel, Stream& stream = Stream::Null());
+//! Calculates histogram with evenly distributed bins for four-channel source.
+//! All channels of source are processed separately.
+//! Supports CV_8UC4, CV_16UC4 and CV_16SC4 source types.
+//! Output hist[i] will have one row and histSize[i] cols and CV_32SC1 type.
+CV_EXPORTS void histEven(const GpuMat& src, GpuMat hist[4], int histSize[4], int lowerLevel[4], int upperLevel[4], Stream& stream = Stream::Null());
+CV_EXPORTS void histEven(const GpuMat& src, GpuMat hist[4], GpuMat& buf, int histSize[4], int lowerLevel[4], int upperLevel[4], Stream& stream = Stream::Null());
+//! Calculates histogram with bins determined by levels array.
+//! levels must have one row and CV_32SC1 type if source has integer type or CV_32FC1 otherwise.
+//! Supports CV_8UC1, CV_16UC1, CV_16SC1 and CV_32FC1 source types.
+//! Output hist will have one row and (levels.cols-1) cols and CV_32SC1 type.
+CV_EXPORTS void histRange(const GpuMat& src, GpuMat& hist, const GpuMat& levels, Stream& stream = Stream::Null());
+CV_EXPORTS void histRange(const GpuMat& src, GpuMat& hist, const GpuMat& levels, GpuMat& buf, Stream& stream = Stream::Null());
+//! Calculates histogram with bins determined by levels array.
+//! All levels must have one row and CV_32SC1 type if source has integer type or CV_32FC1 otherwise.
+//! All channels of source are processed separately.
+//! Supports CV_8UC4, CV_16UC4, CV_16SC4 and CV_32FC4 source types.
+//! Output hist[i] will have one row and (levels[i].cols-1) cols and CV_32SC1 type.
+CV_EXPORTS void histRange(const GpuMat& src, GpuMat hist[4], const GpuMat levels[4], Stream& stream = Stream::Null());
+CV_EXPORTS void histRange(const GpuMat& src, GpuMat hist[4], const GpuMat levels[4], GpuMat& buf, Stream& stream = Stream::Null());
+
+//! Calculates histogram for 8u one channel image
+//! Output hist will have one row, 256 cols and CV32SC1 type.
+CV_EXPORTS void calcHist(const GpuMat& src, GpuMat& hist, Stream& stream = Stream::Null());
+CV_EXPORTS void calcHist(const GpuMat& src, GpuMat& hist, GpuMat& buf, Stream& stream = Stream::Null());
+
+//! normalizes the grayscale image brightness and contrast by normalizing its histogram
+CV_EXPORTS void equalizeHist(const GpuMat& src, GpuMat& dst, Stream& stream = Stream::Null());
+CV_EXPORTS void equalizeHist(const GpuMat& src, GpuMat& dst, GpuMat& hist, Stream& stream = Stream::Null());
+CV_EXPORTS void equalizeHist(const GpuMat& src, GpuMat& dst, GpuMat& hist, GpuMat& buf, Stream& stream = Stream::Null());
+
+class CV_EXPORTS CLAHE : public cv::CLAHE
+{
+public:
+ using cv::CLAHE::apply;
+ virtual void apply(InputArray src, OutputArray dst, Stream& stream) = 0;
+};
+CV_EXPORTS Ptr<cv::gpu::CLAHE> createCLAHE(double clipLimit = 40.0, Size tileGridSize = Size(8, 8));
+
+//////////////////////////////// StereoBM_GPU ////////////////////////////////
+
+class CV_EXPORTS StereoBM_GPU
+{
+public:
+ enum { BASIC_PRESET = 0, PREFILTER_XSOBEL = 1 };
+
+ enum { DEFAULT_NDISP = 64, DEFAULT_WINSZ = 19 };
+
+ //! the default constructor
+ StereoBM_GPU();
+ //! the full constructor taking the camera-specific preset, number of disparities and the SAD window size. ndisparities must be multiple of 8.
+ StereoBM_GPU(int preset, int ndisparities = DEFAULT_NDISP, int winSize = DEFAULT_WINSZ);
+
+ //! the stereo correspondence operator. Finds the disparity for the specified rectified stereo pair
+ //! Output disparity has CV_8U type.
+ void operator()(const GpuMat& left, const GpuMat& right, GpuMat& disparity, Stream& stream = Stream::Null());
+
+ //! Some heuristics that tries to estmate
+ // if current GPU will be faster than CPU in this algorithm.
+ // It queries current active device.
+ static bool checkIfGpuCallReasonable();
+
+ int preset;
+ int ndisp;
+ int winSize;
+
+ // If avergeTexThreshold == 0 => post procesing is disabled
+ // If avergeTexThreshold != 0 then disparity is set 0 in each point (x,y) where for left image
+ // SumOfHorizontalGradiensInWindow(x, y, winSize) < (winSize * winSize) * avergeTexThreshold
+ // i.e. input left image is low textured.
+ float avergeTexThreshold;
+
+private:
+ GpuMat minSSD, leBuf, riBuf;
+};
+
+////////////////////////// StereoBeliefPropagation ///////////////////////////
+// "Efficient Belief Propagation for Early Vision"
+// P.Felzenszwalb
+
+class CV_EXPORTS StereoBeliefPropagation
+{
+public:
+ enum { DEFAULT_NDISP = 64 };
+ enum { DEFAULT_ITERS = 5 };
+ enum { DEFAULT_LEVELS = 5 };
+
+ static void estimateRecommendedParams(int width, int height, int& ndisp, int& iters, int& levels);
+
+ //! the default constructor
+ explicit StereoBeliefPropagation(int ndisp = DEFAULT_NDISP,
+ int iters = DEFAULT_ITERS,
+ int levels = DEFAULT_LEVELS,
+ int msg_type = CV_32F);
+
+ //! the full constructor taking the number of disparities, number of BP iterations on each level,
+ //! number of levels, truncation of data cost, data weight,
+ //! truncation of discontinuity cost and discontinuity single jump
+ //! DataTerm = data_weight * min(fabs(I2-I1), max_data_term)
+ //! DiscTerm = min(disc_single_jump * fabs(f1-f2), max_disc_term)
+ //! please see paper for more details
+ StereoBeliefPropagation(int ndisp, int iters, int levels,
+ float max_data_term, float data_weight,
+ float max_disc_term, float disc_single_jump,
+ int msg_type = CV_32F);
+
+ //! the stereo correspondence operator. Finds the disparity for the specified rectified stereo pair,
+ //! if disparity is empty output type will be CV_16S else output type will be disparity.type().
+ void operator()(const GpuMat& left, const GpuMat& right, GpuMat& disparity, Stream& stream = Stream::Null());
+
+
+ //! version for user specified data term
+ void operator()(const GpuMat& data, GpuMat& disparity, Stream& stream = Stream::Null());
+
+ int ndisp;
+
+ int iters;
+ int levels;
+
+ float max_data_term;
+ float data_weight;
+ float max_disc_term;
+ float disc_single_jump;
+
+ int msg_type;
+private:
+ GpuMat u, d, l, r, u2, d2, l2, r2;
+ std::vector<GpuMat> datas;
+ GpuMat out;
+};
+
+/////////////////////////// StereoConstantSpaceBP ///////////////////////////
+// "A Constant-Space Belief Propagation Algorithm for Stereo Matching"
+// Qingxiong Yang, Liang Wang, Narendra Ahuja
+// http://vision.ai.uiuc.edu/~qyang6/
+
+class CV_EXPORTS StereoConstantSpaceBP
+{
+public:
+ enum { DEFAULT_NDISP = 128 };
+ enum { DEFAULT_ITERS = 8 };
+ enum { DEFAULT_LEVELS = 4 };
+ enum { DEFAULT_NR_PLANE = 4 };
+
+ static void estimateRecommendedParams(int width, int height, int& ndisp, int& iters, int& levels, int& nr_plane);
+
+ //! the default constructor
+ explicit StereoConstantSpaceBP(int ndisp = DEFAULT_NDISP,
+ int iters = DEFAULT_ITERS,
+ int levels = DEFAULT_LEVELS,
+ int nr_plane = DEFAULT_NR_PLANE,
+ int msg_type = CV_32F);
+
+ //! the full constructor taking the number of disparities, number of BP iterations on each level,
+ //! number of levels, number of active disparity on the first level, truncation of data cost, data weight,
+ //! truncation of discontinuity cost, discontinuity single jump and minimum disparity threshold
+ StereoConstantSpaceBP(int ndisp, int iters, int levels, int nr_plane,
+ float max_data_term, float data_weight, float max_disc_term, float disc_single_jump,
+ int min_disp_th = 0,
+ int msg_type = CV_32F);
+
+ //! the stereo correspondence operator. Finds the disparity for the specified rectified stereo pair,
+ //! if disparity is empty output type will be CV_16S else output type will be disparity.type().
+ void operator()(const GpuMat& left, const GpuMat& right, GpuMat& disparity, Stream& stream = Stream::Null());
+
+ int ndisp;
+
+ int iters;
+ int levels;
+
+ int nr_plane;
+
+ float max_data_term;
+ float data_weight;
+ float max_disc_term;
+ float disc_single_jump;
+
+ int min_disp_th;
+
+ int msg_type;
+
+ bool use_local_init_data_cost;
+private:
+ GpuMat messages_buffers;
+
+ GpuMat temp;
+ GpuMat out;
+};
+
+/////////////////////////// DisparityBilateralFilter ///////////////////////////
+// Disparity map refinement using joint bilateral filtering given a single color image.
+// Qingxiong Yang, Liang Wang, Narendra Ahuja
+// http://vision.ai.uiuc.edu/~qyang6/
+
+class CV_EXPORTS DisparityBilateralFilter
+{
+public:
+ enum { DEFAULT_NDISP = 64 };
+ enum { DEFAULT_RADIUS = 3 };
+ enum { DEFAULT_ITERS = 1 };
+
+ //! the default constructor
+ explicit DisparityBilateralFilter(int ndisp = DEFAULT_NDISP, int radius = DEFAULT_RADIUS, int iters = DEFAULT_ITERS);
+
+ //! the full constructor taking the number of disparities, filter radius,
+ //! number of iterations, truncation of data continuity, truncation of disparity continuity
+ //! and filter range sigma
+ DisparityBilateralFilter(int ndisp, int radius, int iters, float edge_threshold, float max_disc_threshold, float sigma_range);
+
+ //! the disparity map refinement operator. Refine disparity map using joint bilateral filtering given a single color image.
+ //! disparity must have CV_8U or CV_16S type, image must have CV_8UC1 or CV_8UC3 type.
+ void operator()(const GpuMat& disparity, const GpuMat& image, GpuMat& dst, Stream& stream = Stream::Null());
+
+private:
+ int ndisp;
+ int radius;
+ int iters;
+
+ float edge_threshold;
+ float max_disc_threshold;
+ float sigma_range;
+
+ GpuMat table_color;
+ GpuMat table_space;
+};
+
+
+//////////////// HOG (Histogram-of-Oriented-Gradients) Descriptor and Object Detector //////////////
+struct CV_EXPORTS HOGConfidence
+{
+ double scale;
+ vector<Point> locations;
+ vector<double> confidences;
+ vector<double> part_scores[4];
+};
+
+struct CV_EXPORTS HOGDescriptor
+{
+ enum { DEFAULT_WIN_SIGMA = -1 };
+ enum { DEFAULT_NLEVELS = 64 };
+ enum { DESCR_FORMAT_ROW_BY_ROW, DESCR_FORMAT_COL_BY_COL };
+
+ HOGDescriptor(Size win_size=Size(64, 128), Size block_size=Size(16, 16),
+ Size block_stride=Size(8, 8), Size cell_size=Size(8, 8),
+ int nbins=9, double win_sigma=DEFAULT_WIN_SIGMA,
+ double threshold_L2hys=0.2, bool gamma_correction=true,
+ int nlevels=DEFAULT_NLEVELS);
+
+ size_t getDescriptorSize() const;
+ size_t getBlockHistogramSize() const;
+
+ void setSVMDetector(const vector<float>& detector);
+
+ static vector<float> getDefaultPeopleDetector();
+ static vector<float> getPeopleDetector48x96();
+ static vector<float> getPeopleDetector64x128();
+
+ void detect(const GpuMat& img, vector<Point>& found_locations,
+ double hit_threshold=0, Size win_stride=Size(),
+ Size padding=Size());
+
+ void detectMultiScale(const GpuMat& img, vector<Rect>& found_locations,
+ double hit_threshold=0, Size win_stride=Size(),
+ Size padding=Size(), double scale0=1.05,
+ int group_threshold=2);
+
+ void computeConfidence(const GpuMat& img, vector<Point>& hits, double hit_threshold,
+ Size win_stride, Size padding, vector<Point>& locations, vector<double>& confidences);
+
+ void computeConfidenceMultiScale(const GpuMat& img, vector<Rect>& found_locations,
+ double hit_threshold, Size win_stride, Size padding,
+ vector<HOGConfidence> &conf_out, int group_threshold);
+
+ void getDescriptors(const GpuMat& img, Size win_stride,
+ GpuMat& descriptors,
+ int descr_format=DESCR_FORMAT_COL_BY_COL);
+
+ Size win_size;
+ Size block_size;
+ Size block_stride;
+ Size cell_size;
+ int nbins;
+ double win_sigma;
+ double threshold_L2hys;
+ bool gamma_correction;
+ int nlevels;
+
+protected:
+ void computeBlockHistograms(const GpuMat& img);
+ void computeGradient(const GpuMat& img, GpuMat& grad, GpuMat& qangle);
+
+ double getWinSigma() const;
+ bool checkDetectorSize() const;
+
+ static int numPartsWithin(int size, int part_size, int stride);
+ static Size numPartsWithin(Size size, Size part_size, Size stride);
+
+ // Coefficients of the separating plane
+ float free_coef;
+ GpuMat detector;
+
+ // Results of the last classification step
+ GpuMat labels, labels_buf;
+ Mat labels_host;
+
+ // Results of the last histogram evaluation step
+ GpuMat block_hists, block_hists_buf;
+
+ // Gradients conputation results
+ GpuMat grad, qangle, grad_buf, qangle_buf;
+
+ // returns subbuffer with required size, reallocates buffer if nessesary.
+ static GpuMat getBuffer(const Size& sz, int type, GpuMat& buf);
+ static GpuMat getBuffer(int rows, int cols, int type, GpuMat& buf);
+
+ std::vector<GpuMat> image_scales;
+};
+
+
+////////////////////////////////// BruteForceMatcher //////////////////////////////////
+
+class CV_EXPORTS BruteForceMatcher_GPU_base
+{
+public:
+ enum DistType {L1Dist = 0, L2Dist, HammingDist};
+
+ explicit BruteForceMatcher_GPU_base(DistType distType = L2Dist);
+
+ // Add descriptors to train descriptor collection
+ void add(const std::vector<GpuMat>& descCollection);
+
+ // Get train descriptors collection
+ const std::vector<GpuMat>& getTrainDescriptors() const;
+
+ // Clear train descriptors collection
+ void clear();
+
+ // Return true if there are not train descriptors in collection
+ bool empty() const;
+
+ // Return true if the matcher supports mask in match methods
+ bool isMaskSupported() const;
+
+ // Find one best match for each query descriptor
+ void matchSingle(const GpuMat& query, const GpuMat& train,
+ GpuMat& trainIdx, GpuMat& distance,
+ const GpuMat& mask = GpuMat(), Stream& stream = Stream::Null());
+
+ // Download trainIdx and distance and convert it to CPU vector with DMatch
+ static void matchDownload(const GpuMat& trainIdx, const GpuMat& distance, std::vector<DMatch>& matches);
+ // Convert trainIdx and distance to vector with DMatch
+ static void matchConvert(const Mat& trainIdx, const Mat& distance, std::vector<DMatch>& matches);
+
+ // Find one best match for each query descriptor
+ void match(const GpuMat& query, const GpuMat& train, std::vector<DMatch>& matches, const GpuMat& mask = GpuMat());
+
+ // Make gpu collection of trains and masks in suitable format for matchCollection function
+ void makeGpuCollection(GpuMat& trainCollection, GpuMat& maskCollection, const std::vector<GpuMat>& masks = std::vector<GpuMat>());
+
+ // Find one best match from train collection for each query descriptor
+ void matchCollection(const GpuMat& query, const GpuMat& trainCollection,
+ GpuMat& trainIdx, GpuMat& imgIdx, GpuMat& distance,
+ const GpuMat& masks = GpuMat(), Stream& stream = Stream::Null());
+
+ // Download trainIdx, imgIdx and distance and convert it to vector with DMatch
+ static void matchDownload(const GpuMat& trainIdx, const GpuMat& imgIdx, const GpuMat& distance, std::vector<DMatch>& matches);
+ // Convert trainIdx, imgIdx and distance to vector with DMatch
+ static void matchConvert(const Mat& trainIdx, const Mat& imgIdx, const Mat& distance, std::vector<DMatch>& matches);
+
+ // Find one best match from train collection for each query descriptor.
+ void match(const GpuMat& query, std::vector<DMatch>& matches, const std::vector<GpuMat>& masks = std::vector<GpuMat>());
+
+ // Find k best matches for each query descriptor (in increasing order of distances)
+ void knnMatchSingle(const GpuMat& query, const GpuMat& train,
+ GpuMat& trainIdx, GpuMat& distance, GpuMat& allDist, int k,
+ const GpuMat& mask = GpuMat(), Stream& stream = Stream::Null());
+
+ // Download trainIdx and distance and convert it to vector with DMatch
+ // compactResult is used when mask is not empty. If compactResult is false matches
+ // vector will have the same size as queryDescriptors rows. If compactResult is true
+ // matches vector will not contain matches for fully masked out query descriptors.
+ static void knnMatchDownload(const GpuMat& trainIdx, const GpuMat& distance,
+ std::vector< std::vector<DMatch> >& matches, bool compactResult = false);
+ // Convert trainIdx and distance to vector with DMatch
+ static void knnMatchConvert(const Mat& trainIdx, const Mat& distance,
+ std::vector< std::vector<DMatch> >& matches, bool compactResult = false);
+
+ // Find k best matches for each query descriptor (in increasing order of distances).
+ // compactResult is used when mask is not empty. If compactResult is false matches
+ // vector will have the same size as queryDescriptors rows. If compactResult is true
+ // matches vector will not contain matches for fully masked out query descriptors.
+ void knnMatch(const GpuMat& query, const GpuMat& train,
+ std::vector< std::vector<DMatch> >& matches, int k, const GpuMat& mask = GpuMat(),
+ bool compactResult = false);
+
+ // Find k best matches from train collection for each query descriptor (in increasing order of distances)
+ void knnMatch2Collection(const GpuMat& query, const GpuMat& trainCollection,
+ GpuMat& trainIdx, GpuMat& imgIdx, GpuMat& distance,
+ const GpuMat& maskCollection = GpuMat(), Stream& stream = Stream::Null());
+
+ // Download trainIdx and distance and convert it to vector with DMatch
+ // compactResult is used when mask is not empty. If compactResult is false matches
+ // vector will have the same size as queryDescriptors rows. If compactResult is true
+ // matches vector will not contain matches for fully masked out query descriptors.
+ static void knnMatch2Download(const GpuMat& trainIdx, const GpuMat& imgIdx, const GpuMat& distance,
+ std::vector< std::vector<DMatch> >& matches, bool compactResult = false);
+ // Convert trainIdx and distance to vector with DMatch
+ static void knnMatch2Convert(const Mat& trainIdx, const Mat& imgIdx, const Mat& distance,
+ std::vector< std::vector<DMatch> >& matches, bool compactResult = false);
+
+ // Find k best matches for each query descriptor (in increasing order of distances).
+ // compactResult is used when mask is not empty. If compactResult is false matches
+ // vector will have the same size as queryDescriptors rows. If compactResult is true
+ // matches vector will not contain matches for fully masked out query descriptors.
+ void knnMatch(const GpuMat& query, std::vector< std::vector<DMatch> >& matches, int k,
+ const std::vector<GpuMat>& masks = std::vector<GpuMat>(), bool compactResult = false);
+
+ // Find best matches for each query descriptor which have distance less than maxDistance.
+ // nMatches.at<int>(0, queryIdx) will contain matches count for queryIdx.
+ // carefully nMatches can be greater than trainIdx.cols - it means that matcher didn't find all matches,
+ // because it didn't have enough memory.
+ // If trainIdx is empty, then trainIdx and distance will be created with size nQuery x max((nTrain / 100), 10),
+ // otherwize user can pass own allocated trainIdx and distance with size nQuery x nMaxMatches
+ // Matches doesn't sorted.
+ void radiusMatchSingle(const GpuMat& query, const GpuMat& train,
+ GpuMat& trainIdx, GpuMat& distance, GpuMat& nMatches, float maxDistance,
+ const GpuMat& mask = GpuMat(), Stream& stream = Stream::Null());
+
+ // Download trainIdx, nMatches and distance and convert it to vector with DMatch.
+ // matches will be sorted in increasing order of distances.
+ // compactResult is used when mask is not empty. If compactResult is false matches
+ // vector will have the same size as queryDescriptors rows. If compactResult is true
+ // matches vector will not contain matches for fully masked out query descriptors.
+ static void radiusMatchDownload(const GpuMat& trainIdx, const GpuMat& distance, const GpuMat& nMatches,
+ std::vector< std::vector<DMatch> >& matches, bool compactResult = false);
+ // Convert trainIdx, nMatches and distance to vector with DMatch.
+ static void radiusMatchConvert(const Mat& trainIdx, const Mat& distance, const Mat& nMatches,
+ std::vector< std::vector<DMatch> >& matches, bool compactResult = false);
+
+ // Find best matches for each query descriptor which have distance less than maxDistance
+ // in increasing order of distances).
+ void radiusMatch(const GpuMat& query, const GpuMat& train,
+ std::vector< std::vector<DMatch> >& matches, float maxDistance,
+ const GpuMat& mask = GpuMat(), bool compactResult = false);
+
+ // Find best matches for each query descriptor which have distance less than maxDistance.
+ // If trainIdx is empty, then trainIdx and distance will be created with size nQuery x max((nQuery / 100), 10),
+ // otherwize user can pass own allocated trainIdx and distance with size nQuery x nMaxMatches
+ // Matches doesn't sorted.
+ void radiusMatchCollection(const GpuMat& query, GpuMat& trainIdx, GpuMat& imgIdx, GpuMat& distance, GpuMat& nMatches, float maxDistance,
+ const std::vector<GpuMat>& masks = std::vector<GpuMat>(), Stream& stream = Stream::Null());
+
+ // Download trainIdx, imgIdx, nMatches and distance and convert it to vector with DMatch.
+ // matches will be sorted in increasing order of distances.
+ // compactResult is used when mask is not empty. If compactResult is false matches
+ // vector will have the same size as queryDescriptors rows. If compactResult is true
+ // matches vector will not contain matches for fully masked out query descriptors.
+ static void radiusMatchDownload(const GpuMat& trainIdx, const GpuMat& imgIdx, const GpuMat& distance, const GpuMat& nMatches,
+ std::vector< std::vector<DMatch> >& matches, bool compactResult = false);
+ // Convert trainIdx, nMatches and distance to vector with DMatch.
+ static void radiusMatchConvert(const Mat& trainIdx, const Mat& imgIdx, const Mat& distance, const Mat& nMatches,
+ std::vector< std::vector<DMatch> >& matches, bool compactResult = false);
+
+ // Find best matches from train collection for each query descriptor which have distance less than
+ // maxDistance (in increasing order of distances).
+ void radiusMatch(const GpuMat& query, std::vector< std::vector<DMatch> >& matches, float maxDistance,
+ const std::vector<GpuMat>& masks = std::vector<GpuMat>(), bool compactResult = false);
+
+ DistType distType;
+
+private:
+ std::vector<GpuMat> trainDescCollection;
+};
+
+template <class Distance>
+class CV_EXPORTS BruteForceMatcher_GPU;
+
+template <typename T>
+class CV_EXPORTS BruteForceMatcher_GPU< L1<T> > : public BruteForceMatcher_GPU_base
+{
+public:
+ explicit BruteForceMatcher_GPU() : BruteForceMatcher_GPU_base(L1Dist) {}
+ explicit BruteForceMatcher_GPU(L1<T> /*d*/) : BruteForceMatcher_GPU_base(L1Dist) {}
+};
+template <typename T>
+class CV_EXPORTS BruteForceMatcher_GPU< L2<T> > : public BruteForceMatcher_GPU_base
+{
+public:
+ explicit BruteForceMatcher_GPU() : BruteForceMatcher_GPU_base(L2Dist) {}
+ explicit BruteForceMatcher_GPU(L2<T> /*d*/) : BruteForceMatcher_GPU_base(L2Dist) {}
+};
+template <> class CV_EXPORTS BruteForceMatcher_GPU< Hamming > : public BruteForceMatcher_GPU_base
+{
+public:
+ explicit BruteForceMatcher_GPU() : BruteForceMatcher_GPU_base(HammingDist) {}
+ explicit BruteForceMatcher_GPU(Hamming /*d*/) : BruteForceMatcher_GPU_base(HammingDist) {}
+};
+
+class CV_EXPORTS BFMatcher_GPU : public BruteForceMatcher_GPU_base
+{
+public:
+ explicit BFMatcher_GPU(int norm = NORM_L2) : BruteForceMatcher_GPU_base(norm == NORM_L1 ? L1Dist : norm == NORM_L2 ? L2Dist : HammingDist) {}
+};
+
+////////////////////////////////// CascadeClassifier_GPU //////////////////////////////////////////
+// The cascade classifier class for object detection: supports old haar and new lbp xlm formats and nvbin for haar cascades olny.
+class CV_EXPORTS CascadeClassifier_GPU
+{
+public:
+ CascadeClassifier_GPU();
+ CascadeClassifier_GPU(const std::string& filename);
+ ~CascadeClassifier_GPU();
+
+ bool empty() const;
+ bool load(const std::string& filename);
+ void release();
+
+ /* returns number of detected objects */
+ int detectMultiScale(const GpuMat& image, GpuMat& objectsBuf, double scaleFactor = 1.2, int minNeighbors = 4, Size minSize = Size());
+ int detectMultiScale(const GpuMat& image, GpuMat& objectsBuf, Size maxObjectSize, Size minSize = Size(), double scaleFactor = 1.1, int minNeighbors = 4);
+
+ bool findLargestObject;
+ bool visualizeInPlace;
+
+ Size getClassifierSize() const;
+
+private:
+ struct CascadeClassifierImpl;
+ CascadeClassifierImpl* impl;
+ struct HaarCascade;
+ struct LbpCascade;
+ friend class CascadeClassifier_GPU_LBP;
+};
+
+////////////////////////////////// FAST //////////////////////////////////////////
+
+class CV_EXPORTS FAST_GPU
+{
+public:
+ enum
+ {
+ LOCATION_ROW = 0,
+ RESPONSE_ROW,
+ ROWS_COUNT
+ };
+
+ // all features have same size
+ static const int FEATURE_SIZE = 7;
+
+ explicit FAST_GPU(int threshold, bool nonmaxSuppression = true, double keypointsRatio = 0.05);
+
+ //! finds the keypoints using FAST detector
+ //! supports only CV_8UC1 images
+ void operator ()(const GpuMat& image, const GpuMat& mask, GpuMat& keypoints);
+ void operator ()(const GpuMat& image, const GpuMat& mask, std::vector<KeyPoint>& keypoints);
+
+ //! download keypoints from device to host memory
+ void downloadKeypoints(const GpuMat& d_keypoints, std::vector<KeyPoint>& keypoints);
+
+ //! convert keypoints to KeyPoint vector
+ void convertKeypoints(const Mat& h_keypoints, std::vector<KeyPoint>& keypoints);
+
+ //! release temporary buffer's memory
+ void release();
+
+ bool nonmaxSuppression;
+
+ int threshold;
+
+ //! max keypoints = keypointsRatio * img.size().area()
+ double keypointsRatio;
+
+ //! find keypoints and compute it's response if nonmaxSuppression is true
+ //! return count of detected keypoints
+ int calcKeyPointsLocation(const GpuMat& image, const GpuMat& mask);
+
+ //! get final array of keypoints
+ //! performs nonmax suppression if needed
+ //! return final count of keypoints
+ int getKeyPoints(GpuMat& keypoints);
+
+private:
+ GpuMat kpLoc_;
+ int count_;
+
+ GpuMat score_;
+
+ GpuMat d_keypoints_;
+};
+
+////////////////////////////////// ORB //////////////////////////////////////////
+
+class CV_EXPORTS ORB_GPU
+{
+public:
+ enum
+ {
+ X_ROW = 0,
+ Y_ROW,
+ RESPONSE_ROW,
+ ANGLE_ROW,
+ OCTAVE_ROW,
+ SIZE_ROW,
+ ROWS_COUNT
+ };
+
+ enum
+ {
+ DEFAULT_FAST_THRESHOLD = 20
+ };
+
+ //! Constructor
+ explicit ORB_GPU(int nFeatures = 500, float scaleFactor = 1.2f, int nLevels = 8, int edgeThreshold = 31,
+ int firstLevel = 0, int WTA_K = 2, int scoreType = 0, int patchSize = 31);
+
+ //! Compute the ORB features on an image
+ //! image - the image to compute the features (supports only CV_8UC1 images)
+ //! mask - the mask to apply
+ //! keypoints - the resulting keypoints
+ void operator()(const GpuMat& image, const GpuMat& mask, std::vector<KeyPoint>& keypoints);
+ void operator()(const GpuMat& image, const GpuMat& mask, GpuMat& keypoints);
+
+ //! Compute the ORB features and descriptors on an image
+ //! image - the image to compute the features (supports only CV_8UC1 images)
+ //! mask - the mask to apply
+ //! keypoints - the resulting keypoints
+ //! descriptors - descriptors array
+ void operator()(const GpuMat& image, const GpuMat& mask, std::vector<KeyPoint>& keypoints, GpuMat& descriptors);
+ void operator()(const GpuMat& image, const GpuMat& mask, GpuMat& keypoints, GpuMat& descriptors);
+
+ //! download keypoints from device to host memory
+ void downloadKeyPoints(GpuMat& d_keypoints, std::vector<KeyPoint>& keypoints);
+
+ //! convert keypoints to KeyPoint vector
+ void convertKeyPoints(Mat& d_keypoints, std::vector<KeyPoint>& keypoints);
+
+ //! returns the descriptor size in bytes
+ inline int descriptorSize() const { return kBytes; }
+
+ inline void setFastParams(int threshold, bool nonmaxSuppression = true)
+ {
+ fastDetector_.threshold = threshold;
+ fastDetector_.nonmaxSuppression = nonmaxSuppression;
+ }
+
+ //! release temporary buffer's memory
+ void release();
+
+ //! if true, image will be blurred before descriptors calculation
+ bool blurForDescriptor;
+
+private:
+ enum { kBytes = 32 };
+
+ void buildScalePyramids(const GpuMat& image, const GpuMat& mask);
+
+ void computeKeyPointsPyramid();
+
+ void computeDescriptors(GpuMat& descriptors);
+
+ void mergeKeyPoints(GpuMat& keypoints);
+
+ int nFeatures_;
+ float scaleFactor_;
+ int nLevels_;
+ int edgeThreshold_;
+ int firstLevel_;
+ int WTA_K_;
+ int scoreType_;
+ int patchSize_;
+
+ // The number of desired features per scale
+ std::vector<size_t> n_features_per_level_;
+
+ // Points to compute BRIEF descriptors from
+ GpuMat pattern_;
+
+ std::vector<GpuMat> imagePyr_;
+ std::vector<GpuMat> maskPyr_;
+
+ GpuMat buf_;
+
+ std::vector<GpuMat> keyPointsPyr_;
+ std::vector<int> keyPointsCount_;
+
+ FAST_GPU fastDetector_;
+
+ Ptr<FilterEngine_GPU> blurFilter;
+
+ GpuMat d_keypoints_;
+};
+
+////////////////////////////////// Optical Flow //////////////////////////////////////////
+
+class CV_EXPORTS BroxOpticalFlow
+{
+public:
+ BroxOpticalFlow(float alpha_, float gamma_, float scale_factor_, int inner_iterations_, int outer_iterations_, int solver_iterations_) :
+ alpha(alpha_), gamma(gamma_), scale_factor(scale_factor_),
+ inner_iterations(inner_iterations_), outer_iterations(outer_iterations_), solver_iterations(solver_iterations_)
+ {
+ }
+
+ //! Compute optical flow
+ //! frame0 - source frame (supports only CV_32FC1 type)
+ //! frame1 - frame to track (with the same size and type as frame0)
+ //! u - flow horizontal component (along x axis)
+ //! v - flow vertical component (along y axis)
+ void operator ()(const GpuMat& frame0, const GpuMat& frame1, GpuMat& u, GpuMat& v, Stream& stream = Stream::Null());
+
+ //! flow smoothness
+ float alpha;
+
+ //! gradient constancy importance
+ float gamma;
+
+ //! pyramid scale factor
+ float scale_factor;
+
+ //! number of lagged non-linearity iterations (inner loop)
+ int inner_iterations;
+
+ //! number of warping iterations (number of pyramid levels)
+ int outer_iterations;
+
+ //! number of linear system solver iterations
+ int solver_iterations;
+
+ GpuMat buf;
+};
+
+class CV_EXPORTS GoodFeaturesToTrackDetector_GPU
+{
+public:
+ explicit GoodFeaturesToTrackDetector_GPU(int maxCorners = 1000, double qualityLevel = 0.01, double minDistance = 0.0,
+ int blockSize = 3, bool useHarrisDetector = false, double harrisK = 0.04);
+
+ //! return 1 rows matrix with CV_32FC2 type
+ void operator ()(const GpuMat& image, GpuMat& corners, const GpuMat& mask = GpuMat());
+
+ int maxCorners;
+ double qualityLevel;
+ double minDistance;
+
+ int blockSize;
+ bool useHarrisDetector;
+ double harrisK;
+
+ void releaseMemory()
+ {
+ Dx_.release();
+ Dy_.release();
+ buf_.release();
+ eig_.release();
+ minMaxbuf_.release();
+ tmpCorners_.release();
+ }
+
+private:
+ GpuMat Dx_;
+ GpuMat Dy_;
+ GpuMat buf_;
+ GpuMat eig_;
+ GpuMat minMaxbuf_;
+ GpuMat tmpCorners_;
+};
+
+inline GoodFeaturesToTrackDetector_GPU::GoodFeaturesToTrackDetector_GPU(int maxCorners_, double qualityLevel_, double minDistance_,
+ int blockSize_, bool useHarrisDetector_, double harrisK_)
+{
+ maxCorners = maxCorners_;
+ qualityLevel = qualityLevel_;
+ minDistance = minDistance_;
+ blockSize = blockSize_;
+ useHarrisDetector = useHarrisDetector_;
+ harrisK = harrisK_;
+}
+
+
+class CV_EXPORTS PyrLKOpticalFlow
+{
+public:
+ PyrLKOpticalFlow();
+
+ void sparse(const GpuMat& prevImg, const GpuMat& nextImg, const GpuMat& prevPts, GpuMat& nextPts,
+ GpuMat& status, GpuMat* err = 0);
+
+ void dense(const GpuMat& prevImg, const GpuMat& nextImg, GpuMat& u, GpuMat& v, GpuMat* err = 0);
+
+ void releaseMemory();
+
+ Size winSize;
+ int maxLevel;
+ int iters;
+ double derivLambda; //unused
+ bool useInitialFlow;
+ float minEigThreshold; //unused
+ bool getMinEigenVals; //unused
+
+private:
+ GpuMat uPyr_[2];
+ vector<GpuMat> prevPyr_;
+ vector<GpuMat> nextPyr_;
+ GpuMat vPyr_[2];
+ vector<GpuMat> buf_;
+ vector<GpuMat> unused;
+ bool isDeviceArch11_;
+};
+
+
+class CV_EXPORTS FarnebackOpticalFlow
+{
+public:
+ FarnebackOpticalFlow()
+ {
+ numLevels = 5;
+ pyrScale = 0.5;
+ fastPyramids = false;
+ winSize = 13;
+ numIters = 10;
+ polyN = 5;
+ polySigma = 1.1;
+ flags = 0;
+ isDeviceArch11_ = !DeviceInfo().supports(FEATURE_SET_COMPUTE_12);
+ }
+
+ int numLevels;
+ double pyrScale;
+ bool fastPyramids;
+ int winSize;
+ int numIters;
+ int polyN;
+ double polySigma;
+ int flags;
+
+ void operator ()(const GpuMat &frame0, const GpuMat &frame1, GpuMat &flowx, GpuMat &flowy, Stream &s = Stream::Null());
+
+ void releaseMemory()
+ {
+ frames_[0].release();
+ frames_[1].release();
+ pyrLevel_[0].release();
+ pyrLevel_[1].release();
+ M_.release();
+ bufM_.release();
+ R_[0].release();
+ R_[1].release();
+ blurredFrame_[0].release();
+ blurredFrame_[1].release();
+ pyramid0_.clear();
+ pyramid1_.clear();
+ }
+
+private:
+ void prepareGaussian(
+ int n, double sigma, float *g, float *xg, float *xxg,
+ double &ig11, double &ig03, double &ig33, double &ig55);
+
+ void setPolynomialExpansionConsts(int n, double sigma);
+
+ void updateFlow_boxFilter(
+ const GpuMat& R0, const GpuMat& R1, GpuMat& flowx, GpuMat &flowy,
+ GpuMat& M, GpuMat &bufM, int blockSize, bool updateMatrices, Stream streams[]);
+
+ void updateFlow_gaussianBlur(
+ const GpuMat& R0, const GpuMat& R1, GpuMat& flowx, GpuMat& flowy,
+ GpuMat& M, GpuMat &bufM, int blockSize, bool updateMatrices, Stream streams[]);
+
+ GpuMat frames_[2];
+ GpuMat pyrLevel_[2], M_, bufM_, R_[2], blurredFrame_[2];
+ std::vector<GpuMat> pyramid0_, pyramid1_;
+
+ bool isDeviceArch11_;
+};
+
+
+// Implementation of the Zach, Pock and Bischof Dual TV-L1 Optical Flow method
+//
+// see reference:
+// [1] C. Zach, T. Pock and H. Bischof, "A Duality Based Approach for Realtime TV-L1 Optical Flow".
+// [2] Javier Sanchez, Enric Meinhardt-Llopis and Gabriele Facciolo. "TV-L1 Optical Flow Estimation".
+class CV_EXPORTS OpticalFlowDual_TVL1_GPU
+{
+public:
+ OpticalFlowDual_TVL1_GPU();
+
+ void operator ()(const GpuMat& I0, const GpuMat& I1, GpuMat& flowx, GpuMat& flowy);
+
+ void collectGarbage();
+
+ /**
+ * Time step of the numerical scheme.
+ */
+ double tau;
+
+ /**
+ * Weight parameter for the data term, attachment parameter.
+ * This is the most relevant parameter, which determines the smoothness of the output.
+ * The smaller this parameter is, the smoother the solutions we obtain.
+ * It depends on the range of motions of the images, so its value should be adapted to each image sequence.
+ */
+ double lambda;
+
+ /**
+ * Weight parameter for (u - v)^2, tightness parameter.
+ * It serves as a link between the attachment and the regularization terms.
+ * In theory, it should have a small value in order to maintain both parts in correspondence.
+ * The method is stable for a large range of values of this parameter.
+ */
+ double theta;
+
+ /**
+ * Number of scales used to create the pyramid of images.
+ */
+ int nscales;
+
+ /**
+ * Number of warpings per scale.
+ * Represents the number of times that I1(x+u0) and grad( I1(x+u0) ) are computed per scale.
+ * This is a parameter that assures the stability of the method.
+ * It also affects the running time, so it is a compromise between speed and accuracy.
+ */
+ int warps;
+
+ /**
+ * Stopping criterion threshold used in the numerical scheme, which is a trade-off between precision and running time.
+ * A small value will yield more accurate solutions at the expense of a slower convergence.
+ */
+ double epsilon;
+
+ /**
+ * Stopping criterion iterations number used in the numerical scheme.
+ */
+ int iterations;
+
+ bool useInitialFlow;
+
+private:
+ void procOneScale(const GpuMat& I0, const GpuMat& I1, GpuMat& u1, GpuMat& u2);
+
+ std::vector<GpuMat> I0s;
+ std::vector<GpuMat> I1s;
+ std::vector<GpuMat> u1s;
+ std::vector<GpuMat> u2s;
+
+ GpuMat I1x_buf;
+ GpuMat I1y_buf;
+
+ GpuMat I1w_buf;
+ GpuMat I1wx_buf;
+ GpuMat I1wy_buf;
+
+ GpuMat grad_buf;
+ GpuMat rho_c_buf;
+
+ GpuMat p11_buf;
+ GpuMat p12_buf;
+ GpuMat p21_buf;
+ GpuMat p22_buf;
+
+ GpuMat diff_buf;
+ GpuMat norm_buf;
+};
+
+
+//! Calculates optical flow for 2 images using block matching algorithm */
+CV_EXPORTS void calcOpticalFlowBM(const GpuMat& prev, const GpuMat& curr,
+ Size block_size, Size shift_size, Size max_range, bool use_previous,
+ GpuMat& velx, GpuMat& vely, GpuMat& buf,
+ Stream& stream = Stream::Null());
+
+class CV_EXPORTS FastOpticalFlowBM
+{
+public:
+ void operator ()(const GpuMat& I0, const GpuMat& I1, GpuMat& flowx, GpuMat& flowy, int search_window = 21, int block_window = 7, Stream& s = Stream::Null());
+
+private:
+ GpuMat buffer;
+ GpuMat extended_I0;
+ GpuMat extended_I1;
+};
+
+
+//! Interpolate frames (images) using provided optical flow (displacement field).
+//! frame0 - frame 0 (32-bit floating point images, single channel)
+//! frame1 - frame 1 (the same type and size)
+//! fu - forward horizontal displacement
+//! fv - forward vertical displacement
+//! bu - backward horizontal displacement
+//! bv - backward vertical displacement
+//! pos - new frame position
+//! newFrame - new frame
+//! buf - temporary buffer, will have width x 6*height size, CV_32FC1 type and contain 6 GpuMat;
+//! occlusion masks 0, occlusion masks 1,
+//! interpolated forward flow 0, interpolated forward flow 1,
+//! interpolated backward flow 0, interpolated backward flow 1
+//!
+CV_EXPORTS void interpolateFrames(const GpuMat& frame0, const GpuMat& frame1,
+ const GpuMat& fu, const GpuMat& fv,
+ const GpuMat& bu, const GpuMat& bv,
+ float pos, GpuMat& newFrame, GpuMat& buf,
+ Stream& stream = Stream::Null());
+
+CV_EXPORTS void createOpticalFlowNeedleMap(const GpuMat& u, const GpuMat& v, GpuMat& vertex, GpuMat& colors);
+
+
+//////////////////////// Background/foreground segmentation ////////////////////////
+
+// Foreground Object Detection from Videos Containing Complex Background.
+// Liyuan Li, Weimin Huang, Irene Y.H. Gu, and Qi Tian.
+// ACM MM2003 9p
+class CV_EXPORTS FGDStatModel
+{
+public:
+ struct CV_EXPORTS Params
+ {
+ int Lc; // Quantized levels per 'color' component. Power of two, typically 32, 64 or 128.
+ int N1c; // Number of color vectors used to model normal background color variation at a given pixel.
+ int N2c; // Number of color vectors retained at given pixel. Must be > N1c, typically ~ 5/3 of N1c.
+ // Used to allow the first N1c vectors to adapt over time to changing background.
+
+ int Lcc; // Quantized levels per 'color co-occurrence' component. Power of two, typically 16, 32 or 64.
+ int N1cc; // Number of color co-occurrence vectors used to model normal background color variation at a given pixel.
+ int N2cc; // Number of color co-occurrence vectors retained at given pixel. Must be > N1cc, typically ~ 5/3 of N1cc.
+ // Used to allow the first N1cc vectors to adapt over time to changing background.
+
+ bool is_obj_without_holes; // If TRUE we ignore holes within foreground blobs. Defaults to TRUE.
+ int perform_morphing; // Number of erode-dilate-erode foreground-blob cleanup iterations.
+ // These erase one-pixel junk blobs and merge almost-touching blobs. Default value is 1.
+
+ float alpha1; // How quickly we forget old background pixel values seen. Typically set to 0.1.
+ float alpha2; // "Controls speed of feature learning". Depends on T. Typical value circa 0.005.
+ float alpha3; // Alternate to alpha2, used (e.g.) for quicker initial convergence. Typical value 0.1.
+
+ float delta; // Affects color and color co-occurrence quantization, typically set to 2.
+ float T; // A percentage value which determines when new features can be recognized as new background. (Typically 0.9).
+ float minArea; // Discard foreground blobs whose bounding box is smaller than this threshold.
+
+ // default Params
+ Params();
+ };
+
+ // out_cn - channels count in output result (can be 3 or 4)
+ // 4-channels require more memory, but a bit faster
+ explicit FGDStatModel(int out_cn = 3);
+ explicit FGDStatModel(const cv::gpu::GpuMat& firstFrame, const Params& params = Params(), int out_cn = 3);
+
+ ~FGDStatModel();
+
+ void create(const cv::gpu::GpuMat& firstFrame, const Params& params = Params());
+ void release();
+
+ int update(const cv::gpu::GpuMat& curFrame);
+
+ //8UC3 or 8UC4 reference background image
+ cv::gpu::GpuMat background;
+
+ //8UC1 foreground image
+ cv::gpu::GpuMat foreground;
+
+ std::vector< std::vector<cv::Point> > foreground_regions;
+
+private:
+ FGDStatModel(const FGDStatModel&);
+ FGDStatModel& operator=(const FGDStatModel&);
+
+ class Impl;
+ std::auto_ptr<Impl> impl_;
+};
+
+/*!
+ Gaussian Mixture-based Backbround/Foreground Segmentation Algorithm
+
+ The class implements the following algorithm:
+ "An improved adaptive background mixture model for real-time tracking with shadow detection"
+ P. KadewTraKuPong and R. Bowden,
+ Proc. 2nd European Workshp on Advanced Video-Based Surveillance Systems, 2001."
+ http://personal.ee.surrey.ac.uk/Personal/R.Bowden/publications/avbs01/avbs01.pdf
+*/
+class CV_EXPORTS MOG_GPU
+{
+public:
+ //! the default constructor
+ MOG_GPU(int nmixtures = -1);
+
+ //! re-initiaization method
+ void initialize(Size frameSize, int frameType);
+
+ //! the update operator
+ void operator()(const GpuMat& frame, GpuMat& fgmask, float learningRate = 0.0f, Stream& stream = Stream::Null());
+
+ //! computes a background image which are the mean of all background gaussians
+ void getBackgroundImage(GpuMat& backgroundImage, Stream& stream = Stream::Null()) const;
+
+ //! releases all inner buffers
+ void release();
+
+ int history;
+ float varThreshold;
+ float backgroundRatio;
+ float noiseSigma;
+
+private:
+ int nmixtures_;
+
+ Size frameSize_;
+ int frameType_;
+ int nframes_;
+
+ GpuMat weight_;
+ GpuMat sortKey_;
+ GpuMat mean_;
+ GpuMat var_;
+};
+
+/*!
+ The class implements the following algorithm:
+ "Improved adaptive Gausian mixture model for background subtraction"
+ Z.Zivkovic
+ International Conference Pattern Recognition, UK, August, 2004.
+ http://www.zoranz.net/Publications/zivkovic2004ICPR.pdf
+*/
+class CV_EXPORTS MOG2_GPU
+{
+public:
+ //! the default constructor
+ MOG2_GPU(int nmixtures = -1);
+
+ //! re-initiaization method
+ void initialize(Size frameSize, int frameType);
+
+ //! the update operator
+ void operator()(const GpuMat& frame, GpuMat& fgmask, float learningRate = -1.0f, Stream& stream = Stream::Null());
+
+ //! computes a background image which are the mean of all background gaussians
+ void getBackgroundImage(GpuMat& backgroundImage, Stream& stream = Stream::Null()) const;
+
+ //! releases all inner buffers
+ void release();
+
+ // parameters
+ // you should call initialize after parameters changes
+
+ int history;
+
+ //! here it is the maximum allowed number of mixture components.
+ //! Actual number is determined dynamically per pixel
+ float varThreshold;
+ // threshold on the squared Mahalanobis distance to decide if it is well described
+ // by the background model or not. Related to Cthr from the paper.
+ // This does not influence the update of the background. A typical value could be 4 sigma
+ // and that is varThreshold=4*4=16; Corresponds to Tb in the paper.
+
+ /////////////////////////
+ // less important parameters - things you might change but be carefull
+ ////////////////////////
+
+ float backgroundRatio;
+ // corresponds to fTB=1-cf from the paper
+ // TB - threshold when the component becomes significant enough to be included into
+ // the background model. It is the TB=1-cf from the paper. So I use cf=0.1 => TB=0.
+ // For alpha=0.001 it means that the mode should exist for approximately 105 frames before
+ // it is considered foreground
+ // float noiseSigma;
+ float varThresholdGen;
+
+ //correspondts to Tg - threshold on the squared Mahalan. dist. to decide
+ //when a sample is close to the existing components. If it is not close
+ //to any a new component will be generated. I use 3 sigma => Tg=3*3=9.
+ //Smaller Tg leads to more generated components and higher Tg might make
+ //lead to small number of components but they can grow too large
+ float fVarInit;
+ float fVarMin;
+ float fVarMax;
+
+ //initial variance for the newly generated components.
+ //It will will influence the speed of adaptation. A good guess should be made.
+ //A simple way is to estimate the typical standard deviation from the images.
+ //I used here 10 as a reasonable value
+ // min and max can be used to further control the variance
+ float fCT; //CT - complexity reduction prior
+ //this is related to the number of samples needed to accept that a component
+ //actually exists. We use CT=0.05 of all the samples. By setting CT=0 you get
+ //the standard Stauffer&Grimson algorithm (maybe not exact but very similar)
+
+ //shadow detection parameters
+ bool bShadowDetection; //default 1 - do shadow detection
+ unsigned char nShadowDetection; //do shadow detection - insert this value as the detection result - 127 default value
+ float fTau;
+ // Tau - shadow threshold. The shadow is detected if the pixel is darker
+ //version of the background. Tau is a threshold on how much darker the shadow can be.
+ //Tau= 0.5 means that if pixel is more than 2 times darker then it is not shadow
+ //See: Prati,Mikic,Trivedi,Cucchiarra,"Detecting Moving Shadows...",IEEE PAMI,2003.
+
+private:
+ int nmixtures_;
+
+ Size frameSize_;
+ int frameType_;
+ int nframes_;
+
+ GpuMat weight_;
+ GpuMat variance_;
+ GpuMat mean_;
+
+ GpuMat bgmodelUsedModes_; //keep track of number of modes per pixel
+};
+
+/**
+ * Background Subtractor module. Takes a series of images and returns a sequence of mask (8UC1)
+ * images of the same size, where 255 indicates Foreground and 0 represents Background.
+ * This class implements an algorithm described in "Visual Tracking of Human Visitors under
+ * Variable-Lighting Conditions for a Responsive Audio Art Installation," A. Godbehere,
+ * A. Matsukawa, K. Goldberg, American Control Conference, Montreal, June 2012.
+ */
+class CV_EXPORTS GMG_GPU
+{
+public:
+ GMG_GPU();
+
+ /**
+ * Validate parameters and set up data structures for appropriate frame size.
+ * @param frameSize Input frame size
+ * @param min Minimum value taken on by pixels in image sequence. Usually 0
+ * @param max Maximum value taken on by pixels in image sequence. e.g. 1.0 or 255
+ */
+ void initialize(Size frameSize, float min = 0.0f, float max = 255.0f);
+
+ /**
+ * Performs single-frame background subtraction and builds up a statistical background image
+ * model.
+ * @param frame Input frame
+ * @param fgmask Output mask image representing foreground and background pixels
+ * @param learningRate determines how quickly features are “forgotten” from histograms
+ * @param stream Stream for the asynchronous version
+ */
+ void operator ()(const GpuMat& frame, GpuMat& fgmask, float learningRate = -1.0f, Stream& stream = Stream::Null());
+
+ //! Releases all inner buffers
+ void release();
+
+ //! Total number of distinct colors to maintain in histogram.
+ int maxFeatures;
+
+ //! Set between 0.0 and 1.0, determines how quickly features are "forgotten" from histograms.
+ float learningRate;
+
+ //! Number of frames of video to use to initialize histograms.
+ int numInitializationFrames;
+
+ //! Number of discrete levels in each channel to be used in histograms.
+ int quantizationLevels;
+
+ //! Prior probability that any given pixel is a background pixel. A sensitivity parameter.
+ float backgroundPrior;
+
+ //! Value above which pixel is determined to be FG.
+ float decisionThreshold;
+
+ //! Smoothing radius, in pixels, for cleaning up FG image.
+ int smoothingRadius;
+
+ //! Perform background model update.
+ bool updateBackgroundModel;
+
+private:
+ float maxVal_, minVal_;
+
+ Size frameSize_;
+
+ int frameNum_;
+
+ GpuMat nfeatures_;
+ GpuMat colors_;
+ GpuMat weights_;
+
+ Ptr<FilterEngine_GPU> boxFilter_;
+ GpuMat buf_;
+};
+
+////////////////////////////////// Video Encoding //////////////////////////////////
+
+// Works only under Windows
+// Supports olny H264 video codec and AVI files
+class CV_EXPORTS VideoWriter_GPU
+{
+public:
+ struct EncoderParams;
+
+ // Callbacks for video encoder, use it if you want to work with raw video stream
+ class EncoderCallBack;
+
+ enum SurfaceFormat
+ {
+ SF_UYVY = 0,
+ SF_YUY2,
+ SF_YV12,
+ SF_NV12,
+ SF_IYUV,
+ SF_BGR,
+ SF_GRAY = SF_BGR
+ };
+
+ VideoWriter_GPU();
+ VideoWriter_GPU(const std::string& fileName, cv::Size frameSize, double fps, SurfaceFormat format = SF_BGR);
+ VideoWriter_GPU(const std::string& fileName, cv::Size frameSize, double fps, const EncoderParams& params, SurfaceFormat format = SF_BGR);
+ VideoWriter_GPU(const cv::Ptr<EncoderCallBack>& encoderCallback, cv::Size frameSize, double fps, SurfaceFormat format = SF_BGR);
+ VideoWriter_GPU(const cv::Ptr<EncoderCallBack>& encoderCallback, cv::Size frameSize, double fps, const EncoderParams& params, SurfaceFormat format = SF_BGR);
+ ~VideoWriter_GPU();
+
+ // all methods throws cv::Exception if error occurs
+ void open(const std::string& fileName, cv::Size frameSize, double fps, SurfaceFormat format = SF_BGR);
+ void open(const std::string& fileName, cv::Size frameSize, double fps, const EncoderParams& params, SurfaceFormat format = SF_BGR);
+ void open(const cv::Ptr<EncoderCallBack>& encoderCallback, cv::Size frameSize, double fps, SurfaceFormat format = SF_BGR);
+ void open(const cv::Ptr<EncoderCallBack>& encoderCallback, cv::Size frameSize, double fps, const EncoderParams& params, SurfaceFormat format = SF_BGR);
+
+ bool isOpened() const;
+ void close();
+
+ void write(const cv::gpu::GpuMat& image, bool lastFrame = false);
+
+ struct CV_EXPORTS EncoderParams
+ {
+ int P_Interval; // NVVE_P_INTERVAL,
+ int IDR_Period; // NVVE_IDR_PERIOD,
+ int DynamicGOP; // NVVE_DYNAMIC_GOP,
+ int RCType; // NVVE_RC_TYPE,
+ int AvgBitrate; // NVVE_AVG_BITRATE,
+ int PeakBitrate; // NVVE_PEAK_BITRATE,
+ int QP_Level_Intra; // NVVE_QP_LEVEL_INTRA,
+ int QP_Level_InterP; // NVVE_QP_LEVEL_INTER_P,
+ int QP_Level_InterB; // NVVE_QP_LEVEL_INTER_B,
+ int DeblockMode; // NVVE_DEBLOCK_MODE,
+ int ProfileLevel; // NVVE_PROFILE_LEVEL,
+ int ForceIntra; // NVVE_FORCE_INTRA,
+ int ForceIDR; // NVVE_FORCE_IDR,
+ int ClearStat; // NVVE_CLEAR_STAT,
+ int DIMode; // NVVE_SET_DEINTERLACE,
+ int Presets; // NVVE_PRESETS,
+ int DisableCabac; // NVVE_DISABLE_CABAC,
+ int NaluFramingType; // NVVE_CONFIGURE_NALU_FRAMING_TYPE
+ int DisableSPSPPS; // NVVE_DISABLE_SPS_PPS
+
+ EncoderParams();
+ explicit EncoderParams(const std::string& configFile);
+
+ void load(const std::string& configFile);
+ void save(const std::string& configFile) const;
+ };
+
+ EncoderParams getParams() const;
+
+ class CV_EXPORTS EncoderCallBack
+ {
+ public:
+ enum PicType
+ {
+ IFRAME = 1,
+ PFRAME = 2,
+ BFRAME = 3
+ };
+
+ virtual ~EncoderCallBack() {}
+
+ // callback function to signal the start of bitstream that is to be encoded
+ // must return pointer to buffer
+ virtual uchar* acquireBitStream(int* bufferSize) = 0;
+
+ // callback function to signal that the encoded bitstream is ready to be written to file
+ virtual void releaseBitStream(unsigned char* data, int size) = 0;
+
+ // callback function to signal that the encoding operation on the frame has started
+ virtual void onBeginFrame(int frameNumber, PicType picType) = 0;
+
+ // callback function signals that the encoding operation on the frame has finished
+ virtual void onEndFrame(int frameNumber, PicType picType) = 0;
+ };
+
+private:
+ VideoWriter_GPU(const VideoWriter_GPU&);
+ VideoWriter_GPU& operator=(const VideoWriter_GPU&);
+
+ class Impl;
+ std::auto_ptr<Impl> impl_;
+};
+
+
+////////////////////////////////// Video Decoding //////////////////////////////////////////
+
+namespace detail
+{
+ class FrameQueue;
+ class VideoParser;
+}
+
+class CV_EXPORTS VideoReader_GPU
+{
+public:
+ enum Codec
+ {
+ MPEG1 = 0,
+ MPEG2,
+ MPEG4,
+ VC1,
+ H264,
+ JPEG,
+ H264_SVC,
+ H264_MVC,
+
+ Uncompressed_YUV420 = (('I'<<24)|('Y'<<16)|('U'<<8)|('V')), // Y,U,V (4:2:0)
+ Uncompressed_YV12 = (('Y'<<24)|('V'<<16)|('1'<<8)|('2')), // Y,V,U (4:2:0)
+ Uncompressed_NV12 = (('N'<<24)|('V'<<16)|('1'<<8)|('2')), // Y,UV (4:2:0)
+ Uncompressed_YUYV = (('Y'<<24)|('U'<<16)|('Y'<<8)|('V')), // YUYV/YUY2 (4:2:2)
+ Uncompressed_UYVY = (('U'<<24)|('Y'<<16)|('V'<<8)|('Y')) // UYVY (4:2:2)
+ };
+
+ enum ChromaFormat
+ {
+ Monochrome=0,
+ YUV420,
+ YUV422,
+ YUV444
+ };
+
+ struct FormatInfo
+ {
+ Codec codec;
+ ChromaFormat chromaFormat;
+ int width;
+ int height;
+ };
+
+ class VideoSource;
+
+ VideoReader_GPU();
+ explicit VideoReader_GPU(const std::string& filename);
+ explicit VideoReader_GPU(const cv::Ptr<VideoSource>& source);
+
+ ~VideoReader_GPU();
+
+ void open(const std::string& filename);
+ void open(const cv::Ptr<VideoSource>& source);
+ bool isOpened() const;
+
+ void close();
+
+ bool read(GpuMat& image);
+
+ FormatInfo format() const;
+ void dumpFormat(std::ostream& st);
+
+ class CV_EXPORTS VideoSource
+ {
+ public:
+ VideoSource() : frameQueue_(0), videoParser_(0) {}
+ virtual ~VideoSource() {}
+
+ virtual FormatInfo format() const = 0;
+ virtual void start() = 0;
+ virtual void stop() = 0;
+ virtual bool isStarted() const = 0;
+ virtual bool hasError() const = 0;
+
+ void setFrameQueue(detail::FrameQueue* frameQueue) { frameQueue_ = frameQueue; }
+ void setVideoParser(detail::VideoParser* videoParser) { videoParser_ = videoParser; }
+
+ protected:
+ bool parseVideoData(const uchar* data, size_t size, bool endOfStream = false);
+
+ private:
+ VideoSource(const VideoSource&);
+ VideoSource& operator =(const VideoSource&);
+
+ detail::FrameQueue* frameQueue_;
+ detail::VideoParser* videoParser_;
+ };
+
+private:
+ VideoReader_GPU(const VideoReader_GPU&);
+ VideoReader_GPU& operator =(const VideoReader_GPU&);
+
+ class Impl;
+ std::auto_ptr<Impl> impl_;
+};
+
+//! removes points (CV_32FC2, single row matrix) with zero mask value
+CV_EXPORTS void compactPoints(GpuMat &points0, GpuMat &points1, const GpuMat &mask);
+
+CV_EXPORTS void calcWobbleSuppressionMaps(
+ int left, int idx, int right, Size size, const Mat &ml, const Mat &mr,
+ GpuMat &mapx, GpuMat &mapy);
+
+} // namespace gpu
+
+} // namespace cv
+
+#endif /* __OPENCV_GPU_HPP__ */
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/gpumat.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/gpumat.hpp
new file mode 100644
index 00000000..840398b5
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/gpumat.hpp
@@ -0,0 +1,43 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#include "opencv2/core/gpumat.hpp"
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/stream_accessor.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/stream_accessor.hpp
new file mode 100644
index 00000000..bcd58ba3
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/gpu/stream_accessor.hpp
@@ -0,0 +1,65 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_GPU_STREAM_ACCESSOR_HPP__
+#define __OPENCV_GPU_STREAM_ACCESSOR_HPP__
+
+#include "opencv2/gpu/gpu.hpp"
+#include "cuda_runtime_api.h"
+
+namespace cv
+{
+ namespace gpu
+ {
+ // This is only header file that depends on Cuda. All other headers are independent.
+ // So if you use OpenCV binaries you do noot need to install Cuda Toolkit.
+ // But of you wanna use GPU by yourself, may get cuda stream instance using the class below.
+ // In this case you have to install Cuda Toolkit.
+ struct StreamAccessor
+ {
+ CV_EXPORTS static cudaStream_t getStream(const Stream& stream);
+ CV_EXPORTS static Stream wrapStream(cudaStream_t stream);
+ };
+ }
+}
+
+#endif /* __OPENCV_GPU_STREAM_ACCESSOR_HPP__ */
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/highgui.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/highgui.hpp
new file mode 100644
index 00000000..c76a020a
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/highgui.hpp
@@ -0,0 +1,43 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#include "opencv2/highgui/highgui.hpp"
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/highgui/cap_ios.h b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/highgui/cap_ios.h
new file mode 100644
index 00000000..4c931d40
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/highgui/cap_ios.h
@@ -0,0 +1,171 @@
+/* For iOS video I/O
+ * by Eduard Feicho on 29/07/12
+ * Copyright 2012. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#import <UIKit/UIKit.h>
+#import <Accelerate/Accelerate.h>
+#import <AVFoundation/AVFoundation.h>
+#import <ImageIO/ImageIO.h>
+#include "opencv2/core/core.hpp"
+
+/////////////////////////////////////// CvAbstractCamera /////////////////////////////////////
+
+@class CvAbstractCamera;
+
+@interface CvAbstractCamera : NSObject
+{
+ AVCaptureSession* captureSession;
+ AVCaptureConnection* videoCaptureConnection;
+ AVCaptureVideoPreviewLayer *captureVideoPreviewLayer;
+
+ UIDeviceOrientation currentDeviceOrientation;
+
+ BOOL cameraAvailable;
+ BOOL captureSessionLoaded;
+ BOOL running;
+ BOOL useAVCaptureVideoPreviewLayer;
+
+ AVCaptureDevicePosition defaultAVCaptureDevicePosition;
+ AVCaptureVideoOrientation defaultAVCaptureVideoOrientation;
+ NSString *const defaultAVCaptureSessionPreset;
+
+ int defaultFPS;
+
+ UIView* parentView;
+
+ int imageWidth;
+ int imageHeight;
+}
+
+@property (nonatomic, retain) AVCaptureSession* captureSession;
+@property (nonatomic, retain) AVCaptureConnection* videoCaptureConnection;
+
+@property (nonatomic, readonly) BOOL running;
+@property (nonatomic, readonly) BOOL captureSessionLoaded;
+
+@property (nonatomic, assign) int defaultFPS;
+@property (nonatomic, readonly) AVCaptureVideoPreviewLayer *captureVideoPreviewLayer;
+@property (nonatomic, assign) AVCaptureDevicePosition defaultAVCaptureDevicePosition;
+@property (nonatomic, assign) AVCaptureVideoOrientation defaultAVCaptureVideoOrientation;
+@property (nonatomic, assign) BOOL useAVCaptureVideoPreviewLayer;
+@property (nonatomic, strong) NSString *const defaultAVCaptureSessionPreset;
+
+@property (nonatomic, assign) int imageWidth;
+@property (nonatomic, assign) int imageHeight;
+
+@property (nonatomic, retain) UIView* parentView;
+
+- (void)start;
+- (void)stop;
+- (void)switchCameras;
+
+- (id)initWithParentView:(UIView*)parent;
+
+- (void)createCaptureOutput;
+- (void)createVideoPreviewLayer;
+- (void)updateOrientation;
+
+- (void)lockFocus;
+- (void)unlockFocus;
+- (void)lockExposure;
+- (void)unlockExposure;
+- (void)lockBalance;
+- (void)unlockBalance;
+
+@end
+
+///////////////////////////////// CvVideoCamera ///////////////////////////////////////////
+
+@class CvVideoCamera;
+
+@protocol CvVideoCameraDelegate <NSObject>
+
+#ifdef __cplusplus
+// delegate method for processing image frames
+- (void)processImage:(cv::Mat&)image;
+#endif
+
+@end
+
+@interface CvVideoCamera : CvAbstractCamera<AVCaptureVideoDataOutputSampleBufferDelegate>
+{
+ AVCaptureVideoDataOutput *videoDataOutput;
+
+ dispatch_queue_t videoDataOutputQueue;
+ CALayer *customPreviewLayer;
+
+ BOOL grayscaleMode;
+
+ BOOL recordVideo;
+ BOOL rotateVideo;
+ AVAssetWriterInput* recordAssetWriterInput;
+ AVAssetWriterInputPixelBufferAdaptor* recordPixelBufferAdaptor;
+ AVAssetWriter* recordAssetWriter;
+
+ CMTime lastSampleTime;
+
+}
+
+@property (nonatomic, assign) id<CvVideoCameraDelegate> delegate;
+@property (nonatomic, assign) BOOL grayscaleMode;
+
+@property (nonatomic, assign) BOOL recordVideo;
+@property (nonatomic, assign) BOOL rotateVideo;
+@property (nonatomic, retain) AVAssetWriterInput* recordAssetWriterInput;
+@property (nonatomic, retain) AVAssetWriterInputPixelBufferAdaptor* recordPixelBufferAdaptor;
+@property (nonatomic, retain) AVAssetWriter* recordAssetWriter;
+
+- (void)adjustLayoutToInterfaceOrientation:(UIInterfaceOrientation)interfaceOrientation;
+- (void)layoutPreviewLayer;
+- (void)saveVideo;
+- (NSURL *)videoFileURL;
+- (NSString *)videoFileString;
+
+
+@end
+
+///////////////////////////////// CvPhotoCamera ///////////////////////////////////////////
+
+@class CvPhotoCamera;
+
+@protocol CvPhotoCameraDelegate <NSObject>
+
+- (void)photoCamera:(CvPhotoCamera*)photoCamera capturedImage:(UIImage *)image;
+- (void)photoCameraCancel:(CvPhotoCamera*)photoCamera;
+
+@end
+
+@interface CvPhotoCamera : CvAbstractCamera
+{
+ AVCaptureStillImageOutput *stillImageOutput;
+}
+
+@property (nonatomic, assign) id<CvPhotoCameraDelegate> delegate;
+
+- (void)takePicture;
+
+@end
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/highgui/highgui.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/highgui/highgui.hpp
new file mode 100644
index 00000000..f6f22930
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/highgui/highgui.hpp
@@ -0,0 +1,255 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_HIGHGUI_HPP__
+#define __OPENCV_HIGHGUI_HPP__
+
+#include "opencv2/core/core.hpp"
+#include "opencv2/highgui/highgui_c.h"
+
+#ifdef __cplusplus
+
+struct CvCapture;
+struct CvVideoWriter;
+
+namespace cv
+{
+
+enum {
+ // Flags for namedWindow
+ WINDOW_NORMAL = CV_WINDOW_NORMAL, // the user can resize the window (no constraint) / also use to switch a fullscreen window to a normal size
+ WINDOW_AUTOSIZE = CV_WINDOW_AUTOSIZE, // the user cannot resize the window, the size is constrainted by the image displayed
+ WINDOW_OPENGL = CV_WINDOW_OPENGL, // window with opengl support
+
+ // Flags for set / getWindowProperty
+ WND_PROP_FULLSCREEN = CV_WND_PROP_FULLSCREEN, // fullscreen property
+ WND_PROP_AUTOSIZE = CV_WND_PROP_AUTOSIZE, // autosize property
+ WND_PROP_ASPECT_RATIO = CV_WND_PROP_ASPECTRATIO, // window's aspect ration
+ WND_PROP_OPENGL = CV_WND_PROP_OPENGL // opengl support
+};
+
+CV_EXPORTS_W void namedWindow(const string& winname, int flags = WINDOW_AUTOSIZE);
+CV_EXPORTS_W void destroyWindow(const string& winname);
+CV_EXPORTS_W void destroyAllWindows();
+
+CV_EXPORTS_W int startWindowThread();
+
+CV_EXPORTS_W int waitKey(int delay = 0);
+
+CV_EXPORTS_W void imshow(const string& winname, InputArray mat);
+
+CV_EXPORTS_W void resizeWindow(const string& winname, int width, int height);
+CV_EXPORTS_W void moveWindow(const string& winname, int x, int y);
+
+CV_EXPORTS_W void setWindowProperty(const string& winname, int prop_id, double prop_value);//YV
+CV_EXPORTS_W double getWindowProperty(const string& winname, int prop_id);//YV
+
+enum
+{
+ EVENT_MOUSEMOVE =0,
+ EVENT_LBUTTONDOWN =1,
+ EVENT_RBUTTONDOWN =2,
+ EVENT_MBUTTONDOWN =3,
+ EVENT_LBUTTONUP =4,
+ EVENT_RBUTTONUP =5,
+ EVENT_MBUTTONUP =6,
+ EVENT_LBUTTONDBLCLK =7,
+ EVENT_RBUTTONDBLCLK =8,
+ EVENT_MBUTTONDBLCLK =9
+};
+
+enum
+{
+ EVENT_FLAG_LBUTTON =1,
+ EVENT_FLAG_RBUTTON =2,
+ EVENT_FLAG_MBUTTON =4,
+ EVENT_FLAG_CTRLKEY =8,
+ EVENT_FLAG_SHIFTKEY =16,
+ EVENT_FLAG_ALTKEY =32
+};
+
+typedef void (*MouseCallback)(int event, int x, int y, int flags, void* userdata);
+
+//! assigns callback for mouse events
+CV_EXPORTS void setMouseCallback(const string& winname, MouseCallback onMouse, void* userdata = 0);
+
+
+typedef void (CV_CDECL *TrackbarCallback)(int pos, void* userdata);
+
+CV_EXPORTS int createTrackbar(const string& trackbarname, const string& winname,
+ int* value, int count,
+ TrackbarCallback onChange = 0,
+ void* userdata = 0);
+
+CV_EXPORTS_W int getTrackbarPos(const string& trackbarname, const string& winname);
+CV_EXPORTS_W void setTrackbarPos(const string& trackbarname, const string& winname, int pos);
+
+// OpenGL support
+
+typedef void (*OpenGlDrawCallback)(void* userdata);
+CV_EXPORTS void setOpenGlDrawCallback(const string& winname, OpenGlDrawCallback onOpenGlDraw, void* userdata = 0);
+
+CV_EXPORTS void setOpenGlContext(const string& winname);
+
+CV_EXPORTS void updateWindow(const string& winname);
+
+// < Deperecated
+CV_EXPORTS void pointCloudShow(const string& winname, const GlCamera& camera, const GlArrays& arr);
+CV_EXPORTS void pointCloudShow(const string& winname, const GlCamera& camera, InputArray points, InputArray colors = noArray());
+// >
+
+//Only for Qt
+
+CV_EXPORTS CvFont fontQt(const string& nameFont, int pointSize=-1,
+ Scalar color=Scalar::all(0), int weight=CV_FONT_NORMAL,
+ int style=CV_STYLE_NORMAL, int spacing=0);
+CV_EXPORTS void addText( const Mat& img, const string& text, Point org, CvFont font);
+
+CV_EXPORTS void displayOverlay(const string& winname, const string& text, int delayms CV_DEFAULT(0));
+CV_EXPORTS void displayStatusBar(const string& winname, const string& text, int delayms CV_DEFAULT(0));
+
+CV_EXPORTS void saveWindowParameters(const string& windowName);
+CV_EXPORTS void loadWindowParameters(const string& windowName);
+CV_EXPORTS int startLoop(int (*pt2Func)(int argc, char *argv[]), int argc, char* argv[]);
+CV_EXPORTS void stopLoop();
+
+typedef void (CV_CDECL *ButtonCallback)(int state, void* userdata);
+CV_EXPORTS int createButton( const string& bar_name, ButtonCallback on_change,
+ void* userdata=NULL, int type=CV_PUSH_BUTTON,
+ bool initial_button_state=0);
+
+//-------------------------
+
+enum
+{
+ // 8bit, color or not
+ IMREAD_UNCHANGED =-1,
+ // 8bit, gray
+ IMREAD_GRAYSCALE =0,
+ // ?, color
+ IMREAD_COLOR =1,
+ // any depth, ?
+ IMREAD_ANYDEPTH =2,
+ // ?, any color
+ IMREAD_ANYCOLOR =4
+};
+
+enum
+{
+ IMWRITE_JPEG_QUALITY =1,
+ IMWRITE_PNG_COMPRESSION =16,
+ IMWRITE_PNG_STRATEGY =17,
+ IMWRITE_PNG_BILEVEL =18,
+ IMWRITE_PNG_STRATEGY_DEFAULT =0,
+ IMWRITE_PNG_STRATEGY_FILTERED =1,
+ IMWRITE_PNG_STRATEGY_HUFFMAN_ONLY =2,
+ IMWRITE_PNG_STRATEGY_RLE =3,
+ IMWRITE_PNG_STRATEGY_FIXED =4,
+ IMWRITE_PXM_BINARY =32
+};
+
+CV_EXPORTS_W Mat imread( const string& filename, int flags=1 );
+CV_EXPORTS_W bool imwrite( const string& filename, InputArray img,
+ const vector<int>& params=vector<int>());
+CV_EXPORTS_W Mat imdecode( InputArray buf, int flags );
+CV_EXPORTS Mat imdecode( InputArray buf, int flags, Mat* dst );
+CV_EXPORTS_W bool imencode( const string& ext, InputArray img,
+ CV_OUT vector<uchar>& buf,
+ const vector<int>& params=vector<int>());
+
+#ifndef CV_NO_VIDEO_CAPTURE_CPP_API
+
+template<> void CV_EXPORTS Ptr<CvCapture>::delete_obj();
+template<> void CV_EXPORTS Ptr<CvVideoWriter>::delete_obj();
+
+class CV_EXPORTS_W VideoCapture
+{
+public:
+ CV_WRAP VideoCapture();
+ CV_WRAP VideoCapture(const string& filename);
+ CV_WRAP VideoCapture(int device);
+
+ virtual ~VideoCapture();
+ CV_WRAP virtual bool open(const string& filename);
+ CV_WRAP virtual bool open(int device);
+ CV_WRAP virtual bool isOpened() const;
+ CV_WRAP virtual void release();
+
+ CV_WRAP virtual bool grab();
+ CV_WRAP virtual bool retrieve(CV_OUT Mat& image, int channel=0);
+ virtual VideoCapture& operator >> (CV_OUT Mat& image);
+ CV_WRAP virtual bool read(CV_OUT Mat& image);
+
+ CV_WRAP virtual bool set(int propId, double value);
+ CV_WRAP virtual double get(int propId);
+
+protected:
+ Ptr<CvCapture> cap;
+};
+
+
+class CV_EXPORTS_W VideoWriter
+{
+public:
+ CV_WRAP VideoWriter();
+ CV_WRAP VideoWriter(const string& filename, int fourcc, double fps,
+ Size frameSize, bool isColor=true);
+
+ virtual ~VideoWriter();
+ CV_WRAP virtual bool open(const string& filename, int fourcc, double fps,
+ Size frameSize, bool isColor=true);
+ CV_WRAP virtual bool isOpened() const;
+ CV_WRAP virtual void release();
+ virtual VideoWriter& operator << (const Mat& image);
+ CV_WRAP virtual void write(const Mat& image);
+
+protected:
+ Ptr<CvVideoWriter> writer;
+};
+
+#endif
+
+}
+
+#endif
+
+#endif
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/highgui/highgui_c.h b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/highgui/highgui_c.h
new file mode 100644
index 00000000..85a59bb0
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/highgui/highgui_c.h
@@ -0,0 +1,660 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// Intel License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000, Intel Corporation, all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of Intel Corporation may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_HIGHGUI_H__
+#define __OPENCV_HIGHGUI_H__
+
+#include "opencv2/core/core_c.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+/****************************************************************************************\
+* Basic GUI functions *
+\****************************************************************************************/
+//YV
+//-----------New for Qt
+/* For font */
+enum { CV_FONT_LIGHT = 25,//QFont::Light,
+ CV_FONT_NORMAL = 50,//QFont::Normal,
+ CV_FONT_DEMIBOLD = 63,//QFont::DemiBold,
+ CV_FONT_BOLD = 75,//QFont::Bold,
+ CV_FONT_BLACK = 87 //QFont::Black
+};
+
+enum { CV_STYLE_NORMAL = 0,//QFont::StyleNormal,
+ CV_STYLE_ITALIC = 1,//QFont::StyleItalic,
+ CV_STYLE_OBLIQUE = 2 //QFont::StyleOblique
+};
+/* ---------*/
+
+//for color cvScalar(blue_component, green_component, red\_component[, alpha_component])
+//and alpha= 0 <-> 0xFF (not transparent <-> transparent)
+CVAPI(CvFont) cvFontQt(const char* nameFont, int pointSize CV_DEFAULT(-1), CvScalar color CV_DEFAULT(cvScalarAll(0)), int weight CV_DEFAULT(CV_FONT_NORMAL), int style CV_DEFAULT(CV_STYLE_NORMAL), int spacing CV_DEFAULT(0));
+
+CVAPI(void) cvAddText(const CvArr* img, const char* text, CvPoint org, CvFont *arg2);
+
+CVAPI(void) cvDisplayOverlay(const char* name, const char* text, int delayms CV_DEFAULT(0));
+CVAPI(void) cvDisplayStatusBar(const char* name, const char* text, int delayms CV_DEFAULT(0));
+
+CVAPI(void) cvSaveWindowParameters(const char* name);
+CVAPI(void) cvLoadWindowParameters(const char* name);
+CVAPI(int) cvStartLoop(int (*pt2Func)(int argc, char *argv[]), int argc, char* argv[]);
+CVAPI(void) cvStopLoop( void );
+
+typedef void (CV_CDECL *CvButtonCallback)(int state, void* userdata);
+enum {CV_PUSH_BUTTON = 0, CV_CHECKBOX = 1, CV_RADIOBOX = 2};
+CVAPI(int) cvCreateButton( const char* button_name CV_DEFAULT(NULL),CvButtonCallback on_change CV_DEFAULT(NULL), void* userdata CV_DEFAULT(NULL) , int button_type CV_DEFAULT(CV_PUSH_BUTTON), int initial_button_state CV_DEFAULT(0));
+//----------------------
+
+
+/* this function is used to set some external parameters in case of X Window */
+CVAPI(int) cvInitSystem( int argc, char** argv );
+
+CVAPI(int) cvStartWindowThread( void );
+
+// --------- YV ---------
+enum
+{
+ //These 3 flags are used by cvSet/GetWindowProperty
+ CV_WND_PROP_FULLSCREEN = 0, //to change/get window's fullscreen property
+ CV_WND_PROP_AUTOSIZE = 1, //to change/get window's autosize property
+ CV_WND_PROP_ASPECTRATIO= 2, //to change/get window's aspectratio property
+ CV_WND_PROP_OPENGL = 3, //to change/get window's opengl support
+
+ //These 2 flags are used by cvNamedWindow and cvSet/GetWindowProperty
+ CV_WINDOW_NORMAL = 0x00000000, //the user can resize the window (no constraint) / also use to switch a fullscreen window to a normal size
+ CV_WINDOW_AUTOSIZE = 0x00000001, //the user cannot resize the window, the size is constrainted by the image displayed
+ CV_WINDOW_OPENGL = 0x00001000, //window with opengl support
+
+ //Those flags are only for Qt
+ CV_GUI_EXPANDED = 0x00000000, //status bar and tool bar
+ CV_GUI_NORMAL = 0x00000010, //old fashious way
+
+ //These 3 flags are used by cvNamedWindow and cvSet/GetWindowProperty
+ CV_WINDOW_FULLSCREEN = 1,//change the window to fullscreen
+ CV_WINDOW_FREERATIO = 0x00000100,//the image expends as much as it can (no ratio constraint)
+ CV_WINDOW_KEEPRATIO = 0x00000000//the ration image is respected.
+};
+
+/* create window */
+CVAPI(int) cvNamedWindow( const char* name, int flags CV_DEFAULT(CV_WINDOW_AUTOSIZE) );
+
+/* Set and Get Property of the window */
+CVAPI(void) cvSetWindowProperty(const char* name, int prop_id, double prop_value);
+CVAPI(double) cvGetWindowProperty(const char* name, int prop_id);
+
+/* display image within window (highgui windows remember their content) */
+CVAPI(void) cvShowImage( const char* name, const CvArr* image );
+
+/* resize/move window */
+CVAPI(void) cvResizeWindow( const char* name, int width, int height );
+CVAPI(void) cvMoveWindow( const char* name, int x, int y );
+
+
+/* destroy window and all the trackers associated with it */
+CVAPI(void) cvDestroyWindow( const char* name );
+
+CVAPI(void) cvDestroyAllWindows(void);
+
+/* get native window handle (HWND in case of Win32 and Widget in case of X Window) */
+CVAPI(void*) cvGetWindowHandle( const char* name );
+
+/* get name of highgui window given its native handle */
+CVAPI(const char*) cvGetWindowName( void* window_handle );
+
+
+typedef void (CV_CDECL *CvTrackbarCallback)(int pos);
+
+/* create trackbar and display it on top of given window, set callback */
+CVAPI(int) cvCreateTrackbar( const char* trackbar_name, const char* window_name,
+ int* value, int count, CvTrackbarCallback on_change CV_DEFAULT(NULL));
+
+typedef void (CV_CDECL *CvTrackbarCallback2)(int pos, void* userdata);
+
+CVAPI(int) cvCreateTrackbar2( const char* trackbar_name, const char* window_name,
+ int* value, int count, CvTrackbarCallback2 on_change,
+ void* userdata CV_DEFAULT(0));
+
+/* retrieve or set trackbar position */
+CVAPI(int) cvGetTrackbarPos( const char* trackbar_name, const char* window_name );
+CVAPI(void) cvSetTrackbarPos( const char* trackbar_name, const char* window_name, int pos );
+CVAPI(void) cvSetTrackbarMax(const char* trackbar_name, const char* window_name, int maxval);
+
+enum
+{
+ CV_EVENT_MOUSEMOVE =0,
+ CV_EVENT_LBUTTONDOWN =1,
+ CV_EVENT_RBUTTONDOWN =2,
+ CV_EVENT_MBUTTONDOWN =3,
+ CV_EVENT_LBUTTONUP =4,
+ CV_EVENT_RBUTTONUP =5,
+ CV_EVENT_MBUTTONUP =6,
+ CV_EVENT_LBUTTONDBLCLK =7,
+ CV_EVENT_RBUTTONDBLCLK =8,
+ CV_EVENT_MBUTTONDBLCLK =9
+};
+
+enum
+{
+ CV_EVENT_FLAG_LBUTTON =1,
+ CV_EVENT_FLAG_RBUTTON =2,
+ CV_EVENT_FLAG_MBUTTON =4,
+ CV_EVENT_FLAG_CTRLKEY =8,
+ CV_EVENT_FLAG_SHIFTKEY =16,
+ CV_EVENT_FLAG_ALTKEY =32
+};
+
+typedef void (CV_CDECL *CvMouseCallback )(int event, int x, int y, int flags, void* param);
+
+/* assign callback for mouse events */
+CVAPI(void) cvSetMouseCallback( const char* window_name, CvMouseCallback on_mouse,
+ void* param CV_DEFAULT(NULL));
+
+enum
+{
+/* 8bit, color or not */
+ CV_LOAD_IMAGE_UNCHANGED =-1,
+/* 8bit, gray */
+ CV_LOAD_IMAGE_GRAYSCALE =0,
+/* ?, color */
+ CV_LOAD_IMAGE_COLOR =1,
+/* any depth, ? */
+ CV_LOAD_IMAGE_ANYDEPTH =2,
+/* ?, any color */
+ CV_LOAD_IMAGE_ANYCOLOR =4
+};
+
+/* load image from file
+ iscolor can be a combination of above flags where CV_LOAD_IMAGE_UNCHANGED
+ overrides the other flags
+ using CV_LOAD_IMAGE_ANYCOLOR alone is equivalent to CV_LOAD_IMAGE_UNCHANGED
+ unless CV_LOAD_IMAGE_ANYDEPTH is specified images are converted to 8bit
+*/
+CVAPI(IplImage*) cvLoadImage( const char* filename, int iscolor CV_DEFAULT(CV_LOAD_IMAGE_COLOR));
+CVAPI(CvMat*) cvLoadImageM( const char* filename, int iscolor CV_DEFAULT(CV_LOAD_IMAGE_COLOR));
+
+enum
+{
+ CV_IMWRITE_JPEG_QUALITY =1,
+ CV_IMWRITE_PNG_COMPRESSION =16,
+ CV_IMWRITE_PNG_STRATEGY =17,
+ CV_IMWRITE_PNG_BILEVEL =18,
+ CV_IMWRITE_PNG_STRATEGY_DEFAULT =0,
+ CV_IMWRITE_PNG_STRATEGY_FILTERED =1,
+ CV_IMWRITE_PNG_STRATEGY_HUFFMAN_ONLY =2,
+ CV_IMWRITE_PNG_STRATEGY_RLE =3,
+ CV_IMWRITE_PNG_STRATEGY_FIXED =4,
+ CV_IMWRITE_PXM_BINARY =32
+};
+
+/* save image to file */
+CVAPI(int) cvSaveImage( const char* filename, const CvArr* image,
+ const int* params CV_DEFAULT(0) );
+
+/* decode image stored in the buffer */
+CVAPI(IplImage*) cvDecodeImage( const CvMat* buf, int iscolor CV_DEFAULT(CV_LOAD_IMAGE_COLOR));
+CVAPI(CvMat*) cvDecodeImageM( const CvMat* buf, int iscolor CV_DEFAULT(CV_LOAD_IMAGE_COLOR));
+
+/* encode image and store the result as a byte vector (single-row 8uC1 matrix) */
+CVAPI(CvMat*) cvEncodeImage( const char* ext, const CvArr* image,
+ const int* params CV_DEFAULT(0) );
+
+enum
+{
+ CV_CVTIMG_FLIP =1,
+ CV_CVTIMG_SWAP_RB =2
+};
+
+/* utility function: convert one image to another with optional vertical flip */
+CVAPI(void) cvConvertImage( const CvArr* src, CvArr* dst, int flags CV_DEFAULT(0));
+
+/* wait for key event infinitely (delay<=0) or for "delay" milliseconds */
+CVAPI(int) cvWaitKey(int delay CV_DEFAULT(0));
+
+// OpenGL support
+
+typedef void (CV_CDECL *CvOpenGlDrawCallback)(void* userdata);
+CVAPI(void) cvSetOpenGlDrawCallback(const char* window_name, CvOpenGlDrawCallback callback, void* userdata CV_DEFAULT(NULL));
+
+CVAPI(void) cvSetOpenGlContext(const char* window_name);
+CVAPI(void) cvUpdateWindow(const char* window_name);
+
+
+/****************************************************************************************\
+* Working with Video Files and Cameras *
+\****************************************************************************************/
+
+/* "black box" capture structure */
+typedef struct CvCapture CvCapture;
+
+/* start capturing frames from video file */
+CVAPI(CvCapture*) cvCreateFileCapture( const char* filename );
+
+enum
+{
+ CV_CAP_ANY =0, // autodetect
+
+ CV_CAP_MIL =100, // MIL proprietary drivers
+
+ CV_CAP_VFW =200, // platform native
+ CV_CAP_V4L =200,
+ CV_CAP_V4L2 =200,
+
+ CV_CAP_FIREWARE =300, // IEEE 1394 drivers
+ CV_CAP_FIREWIRE =300,
+ CV_CAP_IEEE1394 =300,
+ CV_CAP_DC1394 =300,
+ CV_CAP_CMU1394 =300,
+
+ CV_CAP_STEREO =400, // TYZX proprietary drivers
+ CV_CAP_TYZX =400,
+ CV_TYZX_LEFT =400,
+ CV_TYZX_RIGHT =401,
+ CV_TYZX_COLOR =402,
+ CV_TYZX_Z =403,
+
+ CV_CAP_QT =500, // QuickTime
+
+ CV_CAP_UNICAP =600, // Unicap drivers
+
+ CV_CAP_DSHOW =700, // DirectShow (via videoInput)
+ CV_CAP_MSMF =1400, // Microsoft Media Foundation (via videoInput)
+
+ CV_CAP_PVAPI =800, // PvAPI, Prosilica GigE SDK
+
+ CV_CAP_OPENNI =900, // OpenNI (for Kinect)
+ CV_CAP_OPENNI_ASUS =910, // OpenNI (for Asus Xtion)
+
+ CV_CAP_ANDROID =1000, // Android
+ CV_CAP_ANDROID_BACK =CV_CAP_ANDROID+99, // Android back camera
+ CV_CAP_ANDROID_FRONT =CV_CAP_ANDROID+98, // Android front camera
+
+ CV_CAP_XIAPI =1100, // XIMEA Camera API
+
+ CV_CAP_AVFOUNDATION = 1200, // AVFoundation framework for iOS (OS X Lion will have the same API)
+
+ CV_CAP_GIGANETIX = 1300, // Smartek Giganetix GigEVisionSDK
+
+ CV_CAP_INTELPERC = 1500 // Intel Perceptual Computing SDK
+};
+
+/* start capturing frames from camera: index = camera_index + domain_offset (CV_CAP_*) */
+CVAPI(CvCapture*) cvCreateCameraCapture( int index );
+
+/* grab a frame, return 1 on success, 0 on fail.
+ this function is thought to be fast */
+CVAPI(int) cvGrabFrame( CvCapture* capture );
+
+/* get the frame grabbed with cvGrabFrame(..)
+ This function may apply some frame processing like
+ frame decompression, flipping etc.
+ !!!DO NOT RELEASE or MODIFY the retrieved frame!!! */
+CVAPI(IplImage*) cvRetrieveFrame( CvCapture* capture, int streamIdx CV_DEFAULT(0) );
+
+/* Just a combination of cvGrabFrame and cvRetrieveFrame
+ !!!DO NOT RELEASE or MODIFY the retrieved frame!!! */
+CVAPI(IplImage*) cvQueryFrame( CvCapture* capture );
+
+/* stop capturing/reading and free resources */
+CVAPI(void) cvReleaseCapture( CvCapture** capture );
+
+enum
+{
+ // modes of the controlling registers (can be: auto, manual, auto single push, absolute Latter allowed with any other mode)
+ // every feature can have only one mode turned on at a time
+ CV_CAP_PROP_DC1394_OFF = -4, //turn the feature off (not controlled manually nor automatically)
+ CV_CAP_PROP_DC1394_MODE_MANUAL = -3, //set automatically when a value of the feature is set by the user
+ CV_CAP_PROP_DC1394_MODE_AUTO = -2,
+ CV_CAP_PROP_DC1394_MODE_ONE_PUSH_AUTO = -1,
+ CV_CAP_PROP_POS_MSEC =0,
+ CV_CAP_PROP_POS_FRAMES =1,
+ CV_CAP_PROP_POS_AVI_RATIO =2,
+ CV_CAP_PROP_FRAME_WIDTH =3,
+ CV_CAP_PROP_FRAME_HEIGHT =4,
+ CV_CAP_PROP_FPS =5,
+ CV_CAP_PROP_FOURCC =6,
+ CV_CAP_PROP_FRAME_COUNT =7,
+ CV_CAP_PROP_FORMAT =8,
+ CV_CAP_PROP_MODE =9,
+ CV_CAP_PROP_BRIGHTNESS =10,
+ CV_CAP_PROP_CONTRAST =11,
+ CV_CAP_PROP_SATURATION =12,
+ CV_CAP_PROP_HUE =13,
+ CV_CAP_PROP_GAIN =14,
+ CV_CAP_PROP_EXPOSURE =15,
+ CV_CAP_PROP_CONVERT_RGB =16,
+ CV_CAP_PROP_WHITE_BALANCE_U =17,
+ CV_CAP_PROP_RECTIFICATION =18,
+ CV_CAP_PROP_MONOCROME =19,
+ CV_CAP_PROP_SHARPNESS =20,
+ CV_CAP_PROP_AUTO_EXPOSURE =21, // exposure control done by camera,
+ // user can adjust refernce level
+ // using this feature
+ CV_CAP_PROP_GAMMA =22,
+ CV_CAP_PROP_TEMPERATURE =23,
+ CV_CAP_PROP_TRIGGER =24,
+ CV_CAP_PROP_TRIGGER_DELAY =25,
+ CV_CAP_PROP_WHITE_BALANCE_V =26,
+ CV_CAP_PROP_ZOOM =27,
+ CV_CAP_PROP_FOCUS =28,
+ CV_CAP_PROP_GUID =29,
+ CV_CAP_PROP_ISO_SPEED =30,
+ CV_CAP_PROP_MAX_DC1394 =31,
+ CV_CAP_PROP_BACKLIGHT =32,
+ CV_CAP_PROP_PAN =33,
+ CV_CAP_PROP_TILT =34,
+ CV_CAP_PROP_ROLL =35,
+ CV_CAP_PROP_IRIS =36,
+ CV_CAP_PROP_SETTINGS =37,
+ CV_CAP_PROP_BUFFERSIZE =38,
+
+ CV_CAP_PROP_AUTOGRAB =1024, // property for highgui class CvCapture_Android only
+ CV_CAP_PROP_SUPPORTED_PREVIEW_SIZES_STRING=1025, // readonly, tricky property, returns cpnst char* indeed
+ CV_CAP_PROP_PREVIEW_FORMAT=1026, // readonly, tricky property, returns cpnst char* indeed
+
+ // OpenNI map generators
+ CV_CAP_OPENNI_DEPTH_GENERATOR = 1 << 31,
+ CV_CAP_OPENNI_IMAGE_GENERATOR = 1 << 30,
+ CV_CAP_OPENNI_GENERATORS_MASK = CV_CAP_OPENNI_DEPTH_GENERATOR + CV_CAP_OPENNI_IMAGE_GENERATOR,
+
+ // Properties of cameras available through OpenNI interfaces
+ CV_CAP_PROP_OPENNI_OUTPUT_MODE = 100,
+ CV_CAP_PROP_OPENNI_FRAME_MAX_DEPTH = 101, // in mm
+ CV_CAP_PROP_OPENNI_BASELINE = 102, // in mm
+ CV_CAP_PROP_OPENNI_FOCAL_LENGTH = 103, // in pixels
+ CV_CAP_PROP_OPENNI_REGISTRATION = 104, // flag
+ CV_CAP_PROP_OPENNI_REGISTRATION_ON = CV_CAP_PROP_OPENNI_REGISTRATION, // flag that synchronizes the remapping depth map to image map
+ // by changing depth generator's view point (if the flag is "on") or
+ // sets this view point to its normal one (if the flag is "off").
+ CV_CAP_PROP_OPENNI_APPROX_FRAME_SYNC = 105,
+ CV_CAP_PROP_OPENNI_MAX_BUFFER_SIZE = 106,
+ CV_CAP_PROP_OPENNI_CIRCLE_BUFFER = 107,
+ CV_CAP_PROP_OPENNI_MAX_TIME_DURATION = 108,
+
+ CV_CAP_PROP_OPENNI_GENERATOR_PRESENT = 109,
+
+ CV_CAP_OPENNI_IMAGE_GENERATOR_PRESENT = CV_CAP_OPENNI_IMAGE_GENERATOR + CV_CAP_PROP_OPENNI_GENERATOR_PRESENT,
+ CV_CAP_OPENNI_IMAGE_GENERATOR_OUTPUT_MODE = CV_CAP_OPENNI_IMAGE_GENERATOR + CV_CAP_PROP_OPENNI_OUTPUT_MODE,
+ CV_CAP_OPENNI_DEPTH_GENERATOR_BASELINE = CV_CAP_OPENNI_DEPTH_GENERATOR + CV_CAP_PROP_OPENNI_BASELINE,
+ CV_CAP_OPENNI_DEPTH_GENERATOR_FOCAL_LENGTH = CV_CAP_OPENNI_DEPTH_GENERATOR + CV_CAP_PROP_OPENNI_FOCAL_LENGTH,
+ CV_CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION = CV_CAP_OPENNI_DEPTH_GENERATOR + CV_CAP_PROP_OPENNI_REGISTRATION,
+ CV_CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION_ON = CV_CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION,
+
+ // Properties of cameras available through GStreamer interface
+ CV_CAP_GSTREAMER_QUEUE_LENGTH = 200, // default is 1
+
+ // PVAPI
+ CV_CAP_PROP_PVAPI_MULTICASTIP = 300, // ip for anable multicast master mode. 0 for disable multicast
+ CV_CAP_PROP_PVAPI_FRAMESTARTTRIGGERMODE = 301, // FrameStartTriggerMode: Determines how a frame is initiated
+ CV_CAP_PROP_PVAPI_DECIMATIONHORIZONTAL = 302, // Horizontal sub-sampling of the image
+ CV_CAP_PROP_PVAPI_DECIMATIONVERTICAL = 303, // Vertical sub-sampling of the image
+ CV_CAP_PROP_PVAPI_BINNINGX = 304, // Horizontal binning factor
+ CV_CAP_PROP_PVAPI_BINNINGY = 305, // Vertical binning factor
+ CV_CAP_PROP_PVAPI_PIXELFORMAT = 306, // Pixel format
+
+ // Properties of cameras available through XIMEA SDK interface
+ CV_CAP_PROP_XI_DOWNSAMPLING = 400, // Change image resolution by binning or skipping.
+ CV_CAP_PROP_XI_DATA_FORMAT = 401, // Output data format.
+ CV_CAP_PROP_XI_OFFSET_X = 402, // Horizontal offset from the origin to the area of interest (in pixels).
+ CV_CAP_PROP_XI_OFFSET_Y = 403, // Vertical offset from the origin to the area of interest (in pixels).
+ CV_CAP_PROP_XI_TRG_SOURCE = 404, // Defines source of trigger.
+ CV_CAP_PROP_XI_TRG_SOFTWARE = 405, // Generates an internal trigger. PRM_TRG_SOURCE must be set to TRG_SOFTWARE.
+ CV_CAP_PROP_XI_GPI_SELECTOR = 406, // Selects general purpose input
+ CV_CAP_PROP_XI_GPI_MODE = 407, // Set general purpose input mode
+ CV_CAP_PROP_XI_GPI_LEVEL = 408, // Get general purpose level
+ CV_CAP_PROP_XI_GPO_SELECTOR = 409, // Selects general purpose output
+ CV_CAP_PROP_XI_GPO_MODE = 410, // Set general purpose output mode
+ CV_CAP_PROP_XI_LED_SELECTOR = 411, // Selects camera signalling LED
+ CV_CAP_PROP_XI_LED_MODE = 412, // Define camera signalling LED functionality
+ CV_CAP_PROP_XI_MANUAL_WB = 413, // Calculates White Balance(must be called during acquisition)
+ CV_CAP_PROP_XI_AUTO_WB = 414, // Automatic white balance
+ CV_CAP_PROP_XI_AEAG = 415, // Automatic exposure/gain
+ CV_CAP_PROP_XI_EXP_PRIORITY = 416, // Exposure priority (0.5 - exposure 50%, gain 50%).
+ CV_CAP_PROP_XI_AE_MAX_LIMIT = 417, // Maximum limit of exposure in AEAG procedure
+ CV_CAP_PROP_XI_AG_MAX_LIMIT = 418, // Maximum limit of gain in AEAG procedure
+ CV_CAP_PROP_XI_AEAG_LEVEL = 419, // Average intensity of output signal AEAG should achieve(in %)
+ CV_CAP_PROP_XI_TIMEOUT = 420, // Image capture timeout in milliseconds
+
+ // Properties for Android cameras
+ CV_CAP_PROP_ANDROID_FLASH_MODE = 8001,
+ CV_CAP_PROP_ANDROID_FOCUS_MODE = 8002,
+ CV_CAP_PROP_ANDROID_WHITE_BALANCE = 8003,
+ CV_CAP_PROP_ANDROID_ANTIBANDING = 8004,
+ CV_CAP_PROP_ANDROID_FOCAL_LENGTH = 8005,
+ CV_CAP_PROP_ANDROID_FOCUS_DISTANCE_NEAR = 8006,
+ CV_CAP_PROP_ANDROID_FOCUS_DISTANCE_OPTIMAL = 8007,
+ CV_CAP_PROP_ANDROID_FOCUS_DISTANCE_FAR = 8008,
+ CV_CAP_PROP_ANDROID_EXPOSE_LOCK = 8009,
+ CV_CAP_PROP_ANDROID_WHITEBALANCE_LOCK = 8010,
+
+ // Properties of cameras available through AVFOUNDATION interface
+ CV_CAP_PROP_IOS_DEVICE_FOCUS = 9001,
+ CV_CAP_PROP_IOS_DEVICE_EXPOSURE = 9002,
+ CV_CAP_PROP_IOS_DEVICE_FLASH = 9003,
+ CV_CAP_PROP_IOS_DEVICE_WHITEBALANCE = 9004,
+ CV_CAP_PROP_IOS_DEVICE_TORCH = 9005,
+
+ // Properties of cameras available through Smartek Giganetix Ethernet Vision interface
+ /* --- Vladimir Litvinenko (litvinenko.vladimir@gmail.com) --- */
+ CV_CAP_PROP_GIGA_FRAME_OFFSET_X = 10001,
+ CV_CAP_PROP_GIGA_FRAME_OFFSET_Y = 10002,
+ CV_CAP_PROP_GIGA_FRAME_WIDTH_MAX = 10003,
+ CV_CAP_PROP_GIGA_FRAME_HEIGH_MAX = 10004,
+ CV_CAP_PROP_GIGA_FRAME_SENS_WIDTH = 10005,
+ CV_CAP_PROP_GIGA_FRAME_SENS_HEIGH = 10006,
+
+ CV_CAP_PROP_INTELPERC_PROFILE_COUNT = 11001,
+ CV_CAP_PROP_INTELPERC_PROFILE_IDX = 11002,
+ CV_CAP_PROP_INTELPERC_DEPTH_LOW_CONFIDENCE_VALUE = 11003,
+ CV_CAP_PROP_INTELPERC_DEPTH_SATURATION_VALUE = 11004,
+ CV_CAP_PROP_INTELPERC_DEPTH_CONFIDENCE_THRESHOLD = 11005,
+ CV_CAP_PROP_INTELPERC_DEPTH_FOCAL_LENGTH_HORZ = 11006,
+ CV_CAP_PROP_INTELPERC_DEPTH_FOCAL_LENGTH_VERT = 11007,
+
+ // Intel PerC streams
+ CV_CAP_INTELPERC_DEPTH_GENERATOR = 1 << 29,
+ CV_CAP_INTELPERC_IMAGE_GENERATOR = 1 << 28,
+ CV_CAP_INTELPERC_GENERATORS_MASK = CV_CAP_INTELPERC_DEPTH_GENERATOR + CV_CAP_INTELPERC_IMAGE_GENERATOR
+};
+
+enum
+{
+ // Data given from depth generator.
+ CV_CAP_OPENNI_DEPTH_MAP = 0, // Depth values in mm (CV_16UC1)
+ CV_CAP_OPENNI_POINT_CLOUD_MAP = 1, // XYZ in meters (CV_32FC3)
+ CV_CAP_OPENNI_DISPARITY_MAP = 2, // Disparity in pixels (CV_8UC1)
+ CV_CAP_OPENNI_DISPARITY_MAP_32F = 3, // Disparity in pixels (CV_32FC1)
+ CV_CAP_OPENNI_VALID_DEPTH_MASK = 4, // CV_8UC1
+
+ // Data given from RGB image generator.
+ CV_CAP_OPENNI_BGR_IMAGE = 5,
+ CV_CAP_OPENNI_GRAY_IMAGE = 6
+};
+
+// Supported output modes of OpenNI image generator
+enum
+{
+ CV_CAP_OPENNI_VGA_30HZ = 0,
+ CV_CAP_OPENNI_SXGA_15HZ = 1,
+ CV_CAP_OPENNI_SXGA_30HZ = 2,
+ CV_CAP_OPENNI_QVGA_30HZ = 3,
+ CV_CAP_OPENNI_QVGA_60HZ = 4
+};
+
+//supported by Android camera output formats
+enum
+{
+ CV_CAP_ANDROID_COLOR_FRAME_BGR = 0, //BGR
+ CV_CAP_ANDROID_COLOR_FRAME = CV_CAP_ANDROID_COLOR_FRAME_BGR,
+ CV_CAP_ANDROID_GREY_FRAME = 1, //Y
+ CV_CAP_ANDROID_COLOR_FRAME_RGB = 2,
+ CV_CAP_ANDROID_COLOR_FRAME_BGRA = 3,
+ CV_CAP_ANDROID_COLOR_FRAME_RGBA = 4
+};
+
+// supported Android camera flash modes
+enum
+{
+ CV_CAP_ANDROID_FLASH_MODE_AUTO = 0,
+ CV_CAP_ANDROID_FLASH_MODE_OFF,
+ CV_CAP_ANDROID_FLASH_MODE_ON,
+ CV_CAP_ANDROID_FLASH_MODE_RED_EYE,
+ CV_CAP_ANDROID_FLASH_MODE_TORCH
+};
+
+// supported Android camera focus modes
+enum
+{
+ CV_CAP_ANDROID_FOCUS_MODE_AUTO = 0,
+ CV_CAP_ANDROID_FOCUS_MODE_CONTINUOUS_PICTURE,
+ CV_CAP_ANDROID_FOCUS_MODE_CONTINUOUS_VIDEO,
+ CV_CAP_ANDROID_FOCUS_MODE_EDOF,
+ CV_CAP_ANDROID_FOCUS_MODE_FIXED,
+ CV_CAP_ANDROID_FOCUS_MODE_INFINITY,
+ CV_CAP_ANDROID_FOCUS_MODE_MACRO
+};
+
+// supported Android camera white balance modes
+enum
+{
+ CV_CAP_ANDROID_WHITE_BALANCE_AUTO = 0,
+ CV_CAP_ANDROID_WHITE_BALANCE_CLOUDY_DAYLIGHT,
+ CV_CAP_ANDROID_WHITE_BALANCE_DAYLIGHT,
+ CV_CAP_ANDROID_WHITE_BALANCE_FLUORESCENT,
+ CV_CAP_ANDROID_WHITE_BALANCE_INCANDESCENT,
+ CV_CAP_ANDROID_WHITE_BALANCE_SHADE,
+ CV_CAP_ANDROID_WHITE_BALANCE_TWILIGHT,
+ CV_CAP_ANDROID_WHITE_BALANCE_WARM_FLUORESCENT
+};
+
+// supported Android camera antibanding modes
+enum
+{
+ CV_CAP_ANDROID_ANTIBANDING_50HZ = 0,
+ CV_CAP_ANDROID_ANTIBANDING_60HZ,
+ CV_CAP_ANDROID_ANTIBANDING_AUTO,
+ CV_CAP_ANDROID_ANTIBANDING_OFF
+};
+
+enum
+{
+ CV_CAP_INTELPERC_DEPTH_MAP = 0, // Each pixel is a 16-bit integer. The value indicates the distance from an object to the camera's XY plane or the Cartesian depth.
+ CV_CAP_INTELPERC_UVDEPTH_MAP = 1, // Each pixel contains two 32-bit floating point values in the range of 0-1, representing the mapping of depth coordinates to the color coordinates.
+ CV_CAP_INTELPERC_IR_MAP = 2, // Each pixel is a 16-bit integer. The value indicates the intensity of the reflected laser beam.
+ CV_CAP_INTELPERC_IMAGE = 3
+};
+
+/* retrieve or set capture properties */
+CVAPI(double) cvGetCaptureProperty( CvCapture* capture, int property_id );
+CVAPI(int) cvSetCaptureProperty( CvCapture* capture, int property_id, double value );
+
+// Return the type of the capturer (eg, CV_CAP_V4W, CV_CAP_UNICAP), which is unknown if created with CV_CAP_ANY
+CVAPI(int) cvGetCaptureDomain( CvCapture* capture);
+
+/* "black box" video file writer structure */
+typedef struct CvVideoWriter CvVideoWriter;
+
+#define CV_FOURCC_MACRO(c1, c2, c3, c4) (((c1) & 255) + (((c2) & 255) << 8) + (((c3) & 255) << 16) + (((c4) & 255) << 24))
+
+CV_INLINE int CV_FOURCC(char c1, char c2, char c3, char c4)
+{
+ return CV_FOURCC_MACRO(c1, c2, c3, c4);
+}
+
+#define CV_FOURCC_PROMPT -1 /* Open Codec Selection Dialog (Windows only) */
+#define CV_FOURCC_DEFAULT CV_FOURCC('I', 'Y', 'U', 'V') /* Use default codec for specified filename (Linux only) */
+
+/* initialize video file writer */
+CVAPI(CvVideoWriter*) cvCreateVideoWriter( const char* filename, int fourcc,
+ double fps, CvSize frame_size,
+ int is_color CV_DEFAULT(1));
+
+//CVAPI(CvVideoWriter*) cvCreateImageSequenceWriter( const char* filename,
+// int is_color CV_DEFAULT(1));
+
+/* write frame to video file */
+CVAPI(int) cvWriteFrame( CvVideoWriter* writer, const IplImage* image );
+
+/* close video file writer */
+CVAPI(void) cvReleaseVideoWriter( CvVideoWriter** writer );
+
+/****************************************************************************************\
+* Obsolete functions/synonyms *
+\****************************************************************************************/
+
+#define cvCaptureFromFile cvCreateFileCapture
+#define cvCaptureFromCAM cvCreateCameraCapture
+#define cvCaptureFromAVI cvCaptureFromFile
+#define cvCreateAVIWriter cvCreateVideoWriter
+#define cvWriteToAVI cvWriteFrame
+#define cvAddSearchPath(path)
+#define cvvInitSystem cvInitSystem
+#define cvvNamedWindow cvNamedWindow
+#define cvvShowImage cvShowImage
+#define cvvResizeWindow cvResizeWindow
+#define cvvDestroyWindow cvDestroyWindow
+#define cvvCreateTrackbar cvCreateTrackbar
+#define cvvLoadImage(name) cvLoadImage((name),1)
+#define cvvSaveImage cvSaveImage
+#define cvvAddSearchPath cvAddSearchPath
+#define cvvWaitKey(name) cvWaitKey(0)
+#define cvvWaitKeyEx(name,delay) cvWaitKey(delay)
+#define cvvConvertImage cvConvertImage
+#define HG_AUTOSIZE CV_WINDOW_AUTOSIZE
+#define set_preprocess_func cvSetPreprocessFuncWin32
+#define set_postprocess_func cvSetPostprocessFuncWin32
+
+#if defined WIN32 || defined _WIN32
+
+CVAPI(void) cvSetPreprocessFuncWin32_(const void* callback);
+CVAPI(void) cvSetPostprocessFuncWin32_(const void* callback);
+#define cvSetPreprocessFuncWin32(callback) cvSetPreprocessFuncWin32_((const void*)(callback))
+#define cvSetPostprocessFuncWin32(callback) cvSetPostprocessFuncWin32_((const void*)(callback))
+
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/highgui/ios.h b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/highgui/ios.h
new file mode 100644
index 00000000..a7f0395d
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/highgui/ios.h
@@ -0,0 +1,49 @@
+
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#include "opencv2/core/core.hpp"
+#import "opencv2/highgui/cap_ios.h"
+
+UIImage* MatToUIImage(const cv::Mat& image);
+void UIImageToMat(const UIImage* image,
+ cv::Mat& m, bool alphaExist = false);
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/imgproc.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/imgproc.hpp
new file mode 100644
index 00000000..112f7232
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/imgproc.hpp
@@ -0,0 +1,43 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#include "opencv2/imgproc/imgproc.hpp"
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/imgproc/imgproc.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/imgproc/imgproc.hpp
new file mode 100644
index 00000000..aa6a5f6c
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/imgproc/imgproc.hpp
@@ -0,0 +1,1299 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_IMGPROC_HPP__
+#define __OPENCV_IMGPROC_HPP__
+
+#include "opencv2/core/core.hpp"
+#include "opencv2/imgproc/types_c.h"
+
+#ifdef __cplusplus
+
+/*! \namespace cv
+ Namespace where all the C++ OpenCV functionality resides
+ */
+namespace cv
+{
+
+//! various border interpolation methods
+enum { BORDER_REPLICATE=IPL_BORDER_REPLICATE, BORDER_CONSTANT=IPL_BORDER_CONSTANT,
+ BORDER_REFLECT=IPL_BORDER_REFLECT, BORDER_WRAP=IPL_BORDER_WRAP,
+ BORDER_REFLECT_101=IPL_BORDER_REFLECT_101, BORDER_REFLECT101=BORDER_REFLECT_101,
+ BORDER_TRANSPARENT=IPL_BORDER_TRANSPARENT,
+ BORDER_DEFAULT=BORDER_REFLECT_101, BORDER_ISOLATED=16 };
+
+//! 1D interpolation function: returns coordinate of the "donor" pixel for the specified location p.
+CV_EXPORTS_W int borderInterpolate( int p, int len, int borderType );
+
+/*!
+ The Base Class for 1D or Row-wise Filters
+
+ This is the base class for linear or non-linear filters that process 1D data.
+ In particular, such filters are used for the "horizontal" filtering parts in separable filters.
+
+ Several functions in OpenCV return Ptr<BaseRowFilter> for the specific types of filters,
+ and those pointers can be used directly or within cv::FilterEngine.
+*/
+class CV_EXPORTS BaseRowFilter
+{
+public:
+ //! the default constructor
+ BaseRowFilter();
+ //! the destructor
+ virtual ~BaseRowFilter();
+ //! the filtering operator. Must be overridden in the derived classes. The horizontal border interpolation is done outside of the class.
+ virtual void operator()(const uchar* src, uchar* dst,
+ int width, int cn) = 0;
+ int ksize, anchor;
+};
+
+
+/*!
+ The Base Class for Column-wise Filters
+
+ This is the base class for linear or non-linear filters that process columns of 2D arrays.
+ Such filters are used for the "vertical" filtering parts in separable filters.
+
+ Several functions in OpenCV return Ptr<BaseColumnFilter> for the specific types of filters,
+ and those pointers can be used directly or within cv::FilterEngine.
+
+ Unlike cv::BaseRowFilter, cv::BaseColumnFilter may have some context information,
+ i.e. box filter keeps the sliding sum of elements. To reset the state BaseColumnFilter::reset()
+ must be called (e.g. the method is called by cv::FilterEngine)
+ */
+class CV_EXPORTS BaseColumnFilter
+{
+public:
+ //! the default constructor
+ BaseColumnFilter();
+ //! the destructor
+ virtual ~BaseColumnFilter();
+ //! the filtering operator. Must be overridden in the derived classes. The vertical border interpolation is done outside of the class.
+ virtual void operator()(const uchar** src, uchar* dst, int dststep,
+ int dstcount, int width) = 0;
+ //! resets the internal buffers, if any
+ virtual void reset();
+ int ksize, anchor;
+};
+
+/*!
+ The Base Class for Non-Separable 2D Filters.
+
+ This is the base class for linear or non-linear 2D filters.
+
+ Several functions in OpenCV return Ptr<BaseFilter> for the specific types of filters,
+ and those pointers can be used directly or within cv::FilterEngine.
+
+ Similar to cv::BaseColumnFilter, the class may have some context information,
+ that should be reset using BaseFilter::reset() method before processing the new array.
+*/
+class CV_EXPORTS BaseFilter
+{
+public:
+ //! the default constructor
+ BaseFilter();
+ //! the destructor
+ virtual ~BaseFilter();
+ //! the filtering operator. The horizontal and the vertical border interpolation is done outside of the class.
+ virtual void operator()(const uchar** src, uchar* dst, int dststep,
+ int dstcount, int width, int cn) = 0;
+ //! resets the internal buffers, if any
+ virtual void reset();
+ Size ksize;
+ Point anchor;
+};
+
+/*!
+ The Main Class for Image Filtering.
+
+ The class can be used to apply an arbitrary filtering operation to an image.
+ It contains all the necessary intermediate buffers, it computes extrapolated values
+ of the "virtual" pixels outside of the image etc.
+ Pointers to the initialized cv::FilterEngine instances
+ are returned by various OpenCV functions, such as cv::createSeparableLinearFilter(),
+ cv::createLinearFilter(), cv::createGaussianFilter(), cv::createDerivFilter(),
+ cv::createBoxFilter() and cv::createMorphologyFilter().
+
+ Using the class you can process large images by parts and build complex pipelines
+ that include filtering as some of the stages. If all you need is to apply some pre-defined
+ filtering operation, you may use cv::filter2D(), cv::erode(), cv::dilate() etc.
+ functions that create FilterEngine internally.
+
+ Here is the example on how to use the class to implement Laplacian operator, which is the sum of
+ second-order derivatives. More complex variant for different types is implemented in cv::Laplacian().
+
+ \code
+ void laplace_f(const Mat& src, Mat& dst)
+ {
+ CV_Assert( src.type() == CV_32F );
+ // make sure the destination array has the proper size and type
+ dst.create(src.size(), src.type());
+
+ // get the derivative and smooth kernels for d2I/dx2.
+ // for d2I/dy2 we could use the same kernels, just swapped
+ Mat kd, ks;
+ getSobelKernels( kd, ks, 2, 0, ksize, false, ktype );
+
+ // let's process 10 source rows at once
+ int DELTA = std::min(10, src.rows);
+ Ptr<FilterEngine> Fxx = createSeparableLinearFilter(src.type(),
+ dst.type(), kd, ks, Point(-1,-1), 0, borderType, borderType, Scalar() );
+ Ptr<FilterEngine> Fyy = createSeparableLinearFilter(src.type(),
+ dst.type(), ks, kd, Point(-1,-1), 0, borderType, borderType, Scalar() );
+
+ int y = Fxx->start(src), dsty = 0, dy = 0;
+ Fyy->start(src);
+ const uchar* sptr = src.data + y*src.step;
+
+ // allocate the buffers for the spatial image derivatives;
+ // the buffers need to have more than DELTA rows, because at the
+ // last iteration the output may take max(kd.rows-1,ks.rows-1)
+ // rows more than the input.
+ Mat Ixx( DELTA + kd.rows - 1, src.cols, dst.type() );
+ Mat Iyy( DELTA + kd.rows - 1, src.cols, dst.type() );
+
+ // inside the loop we always pass DELTA rows to the filter
+ // (note that the "proceed" method takes care of possibe overflow, since
+ // it was given the actual image height in the "start" method)
+ // on output we can get:
+ // * < DELTA rows (the initial buffer accumulation stage)
+ // * = DELTA rows (settled state in the middle)
+ // * > DELTA rows (then the input image is over, but we generate
+ // "virtual" rows using the border mode and filter them)
+ // this variable number of output rows is dy.
+ // dsty is the current output row.
+ // sptr is the pointer to the first input row in the portion to process
+ for( ; dsty < dst.rows; sptr += DELTA*src.step, dsty += dy )
+ {
+ Fxx->proceed( sptr, (int)src.step, DELTA, Ixx.data, (int)Ixx.step );
+ dy = Fyy->proceed( sptr, (int)src.step, DELTA, d2y.data, (int)Iyy.step );
+ if( dy > 0 )
+ {
+ Mat dstripe = dst.rowRange(dsty, dsty + dy);
+ add(Ixx.rowRange(0, dy), Iyy.rowRange(0, dy), dstripe);
+ }
+ }
+ }
+ \endcode
+*/
+class CV_EXPORTS FilterEngine
+{
+public:
+ //! the default constructor
+ FilterEngine();
+ //! the full constructor. Either _filter2D or both _rowFilter and _columnFilter must be non-empty.
+ FilterEngine(const Ptr<BaseFilter>& _filter2D,
+ const Ptr<BaseRowFilter>& _rowFilter,
+ const Ptr<BaseColumnFilter>& _columnFilter,
+ int srcType, int dstType, int bufType,
+ int _rowBorderType=BORDER_REPLICATE,
+ int _columnBorderType=-1,
+ const Scalar& _borderValue=Scalar());
+ //! the destructor
+ virtual ~FilterEngine();
+ //! reinitializes the engine. The previously assigned filters are released.
+ void init(const Ptr<BaseFilter>& _filter2D,
+ const Ptr<BaseRowFilter>& _rowFilter,
+ const Ptr<BaseColumnFilter>& _columnFilter,
+ int srcType, int dstType, int bufType,
+ int _rowBorderType=BORDER_REPLICATE, int _columnBorderType=-1,
+ const Scalar& _borderValue=Scalar());
+ //! starts filtering of the specified ROI of an image of size wholeSize.
+ virtual int start(Size wholeSize, Rect roi, int maxBufRows=-1);
+ //! starts filtering of the specified ROI of the specified image.
+ virtual int start(const Mat& src, const Rect& srcRoi=Rect(0,0,-1,-1),
+ bool isolated=false, int maxBufRows=-1);
+ //! processes the next srcCount rows of the image.
+ virtual int proceed(const uchar* src, int srcStep, int srcCount,
+ uchar* dst, int dstStep);
+ //! applies filter to the specified ROI of the image. if srcRoi=(0,0,-1,-1), the whole image is filtered.
+ virtual void apply( const Mat& src, Mat& dst,
+ const Rect& srcRoi=Rect(0,0,-1,-1),
+ Point dstOfs=Point(0,0),
+ bool isolated=false);
+ //! returns true if the filter is separable
+ bool isSeparable() const { return (const BaseFilter*)filter2D == 0; }
+ //! returns the number
+ int remainingInputRows() const;
+ int remainingOutputRows() const;
+
+ int srcType, dstType, bufType;
+ Size ksize;
+ Point anchor;
+ int maxWidth;
+ Size wholeSize;
+ Rect roi;
+ int dx1, dx2;
+ int rowBorderType, columnBorderType;
+ vector<int> borderTab;
+ int borderElemSize;
+ vector<uchar> ringBuf;
+ vector<uchar> srcRow;
+ vector<uchar> constBorderValue;
+ vector<uchar> constBorderRow;
+ int bufStep, startY, startY0, endY, rowCount, dstY;
+ vector<uchar*> rows;
+
+ Ptr<BaseFilter> filter2D;
+ Ptr<BaseRowFilter> rowFilter;
+ Ptr<BaseColumnFilter> columnFilter;
+};
+
+//! type of the kernel
+enum { KERNEL_GENERAL=0, KERNEL_SYMMETRICAL=1, KERNEL_ASYMMETRICAL=2,
+ KERNEL_SMOOTH=4, KERNEL_INTEGER=8 };
+
+//! returns type (one of KERNEL_*) of 1D or 2D kernel specified by its coefficients.
+CV_EXPORTS int getKernelType(InputArray kernel, Point anchor);
+
+//! returns the primitive row filter with the specified kernel
+CV_EXPORTS Ptr<BaseRowFilter> getLinearRowFilter(int srcType, int bufType,
+ InputArray kernel, int anchor,
+ int symmetryType);
+
+//! returns the primitive column filter with the specified kernel
+CV_EXPORTS Ptr<BaseColumnFilter> getLinearColumnFilter(int bufType, int dstType,
+ InputArray kernel, int anchor,
+ int symmetryType, double delta=0,
+ int bits=0);
+
+//! returns 2D filter with the specified kernel
+CV_EXPORTS Ptr<BaseFilter> getLinearFilter(int srcType, int dstType,
+ InputArray kernel,
+ Point anchor=Point(-1,-1),
+ double delta=0, int bits=0);
+
+//! returns the separable linear filter engine
+CV_EXPORTS Ptr<FilterEngine> createSeparableLinearFilter(int srcType, int dstType,
+ InputArray rowKernel, InputArray columnKernel,
+ Point anchor=Point(-1,-1), double delta=0,
+ int rowBorderType=BORDER_DEFAULT,
+ int columnBorderType=-1,
+ const Scalar& borderValue=Scalar());
+
+//! returns the non-separable linear filter engine
+CV_EXPORTS Ptr<FilterEngine> createLinearFilter(int srcType, int dstType,
+ InputArray kernel, Point _anchor=Point(-1,-1),
+ double delta=0, int rowBorderType=BORDER_DEFAULT,
+ int columnBorderType=-1, const Scalar& borderValue=Scalar());
+
+//! returns the Gaussian kernel with the specified parameters
+CV_EXPORTS_W Mat getGaussianKernel( int ksize, double sigma, int ktype=CV_64F );
+
+//! returns the Gaussian filter engine
+CV_EXPORTS Ptr<FilterEngine> createGaussianFilter( int type, Size ksize,
+ double sigma1, double sigma2=0,
+ int borderType=BORDER_DEFAULT);
+//! initializes kernels of the generalized Sobel operator
+CV_EXPORTS_W void getDerivKernels( OutputArray kx, OutputArray ky,
+ int dx, int dy, int ksize,
+ bool normalize=false, int ktype=CV_32F );
+//! returns filter engine for the generalized Sobel operator
+CV_EXPORTS Ptr<FilterEngine> createDerivFilter( int srcType, int dstType,
+ int dx, int dy, int ksize,
+ int borderType=BORDER_DEFAULT );
+//! returns horizontal 1D box filter
+CV_EXPORTS Ptr<BaseRowFilter> getRowSumFilter(int srcType, int sumType,
+ int ksize, int anchor=-1);
+//! returns vertical 1D box filter
+CV_EXPORTS Ptr<BaseColumnFilter> getColumnSumFilter( int sumType, int dstType,
+ int ksize, int anchor=-1,
+ double scale=1);
+//! returns box filter engine
+CV_EXPORTS Ptr<FilterEngine> createBoxFilter( int srcType, int dstType, Size ksize,
+ Point anchor=Point(-1,-1),
+ bool normalize=true,
+ int borderType=BORDER_DEFAULT);
+
+//! returns the Gabor kernel with the specified parameters
+CV_EXPORTS_W Mat getGaborKernel( Size ksize, double sigma, double theta, double lambd,
+ double gamma, double psi=CV_PI*0.5, int ktype=CV_64F );
+
+//! type of morphological operation
+enum { MORPH_ERODE=CV_MOP_ERODE, MORPH_DILATE=CV_MOP_DILATE,
+ MORPH_OPEN=CV_MOP_OPEN, MORPH_CLOSE=CV_MOP_CLOSE,
+ MORPH_GRADIENT=CV_MOP_GRADIENT, MORPH_TOPHAT=CV_MOP_TOPHAT,
+ MORPH_BLACKHAT=CV_MOP_BLACKHAT, MORPH_HITMISS };
+
+//! returns horizontal 1D morphological filter
+CV_EXPORTS Ptr<BaseRowFilter> getMorphologyRowFilter(int op, int type, int ksize, int anchor=-1);
+//! returns vertical 1D morphological filter
+CV_EXPORTS Ptr<BaseColumnFilter> getMorphologyColumnFilter(int op, int type, int ksize, int anchor=-1);
+//! returns 2D morphological filter
+CV_EXPORTS Ptr<BaseFilter> getMorphologyFilter(int op, int type, InputArray kernel,
+ Point anchor=Point(-1,-1));
+
+//! returns "magic" border value for erosion and dilation. It is automatically transformed to Scalar::all(-DBL_MAX) for dilation.
+static inline Scalar morphologyDefaultBorderValue() { return Scalar::all(DBL_MAX); }
+
+//! returns morphological filter engine. Only MORPH_ERODE and MORPH_DILATE are supported.
+CV_EXPORTS Ptr<FilterEngine> createMorphologyFilter(int op, int type, InputArray kernel,
+ Point anchor=Point(-1,-1), int rowBorderType=BORDER_CONSTANT,
+ int columnBorderType=-1,
+ const Scalar& borderValue=morphologyDefaultBorderValue());
+
+//! shape of the structuring element
+enum { MORPH_RECT=0, MORPH_CROSS=1, MORPH_ELLIPSE=2 };
+//! returns structuring element of the specified shape and size
+CV_EXPORTS_W Mat getStructuringElement(int shape, Size ksize, Point anchor=Point(-1,-1));
+
+template<> CV_EXPORTS void Ptr<IplConvKernel>::delete_obj();
+
+//! copies 2D array to a larger destination array with extrapolation of the outer part of src using the specified border mode
+CV_EXPORTS_W void copyMakeBorder( InputArray src, OutputArray dst,
+ int top, int bottom, int left, int right,
+ int borderType, const Scalar& value=Scalar() );
+
+//! smooths the image using median filter.
+CV_EXPORTS_W void medianBlur( InputArray src, OutputArray dst, int ksize );
+//! smooths the image using Gaussian filter.
+CV_EXPORTS_W void GaussianBlur( InputArray src,
+ OutputArray dst, Size ksize,
+ double sigmaX, double sigmaY=0,
+ int borderType=BORDER_DEFAULT );
+//! smooths the image using bilateral filter
+CV_EXPORTS_W void bilateralFilter( InputArray src, OutputArray dst, int d,
+ double sigmaColor, double sigmaSpace,
+ int borderType=BORDER_DEFAULT );
+//! smooths the image using adaptive bilateral filter
+CV_EXPORTS_W void adaptiveBilateralFilter( InputArray src, OutputArray dst, Size ksize,
+ double sigmaSpace, double maxSigmaColor = 20.0, Point anchor=Point(-1, -1),
+ int borderType=BORDER_DEFAULT );
+//! smooths the image using the box filter. Each pixel is processed in O(1) time
+CV_EXPORTS_W void boxFilter( InputArray src, OutputArray dst, int ddepth,
+ Size ksize, Point anchor=Point(-1,-1),
+ bool normalize=true,
+ int borderType=BORDER_DEFAULT );
+//! a synonym for normalized box filter
+CV_EXPORTS_W void blur( InputArray src, OutputArray dst,
+ Size ksize, Point anchor=Point(-1,-1),
+ int borderType=BORDER_DEFAULT );
+
+//! applies non-separable 2D linear filter to the image
+CV_EXPORTS_W void filter2D( InputArray src, OutputArray dst, int ddepth,
+ InputArray kernel, Point anchor=Point(-1,-1),
+ double delta=0, int borderType=BORDER_DEFAULT );
+
+//! applies separable 2D linear filter to the image
+CV_EXPORTS_W void sepFilter2D( InputArray src, OutputArray dst, int ddepth,
+ InputArray kernelX, InputArray kernelY,
+ Point anchor=Point(-1,-1),
+ double delta=0, int borderType=BORDER_DEFAULT );
+
+//! applies generalized Sobel operator to the image
+CV_EXPORTS_W void Sobel( InputArray src, OutputArray dst, int ddepth,
+ int dx, int dy, int ksize=3,
+ double scale=1, double delta=0,
+ int borderType=BORDER_DEFAULT );
+
+//! applies the vertical or horizontal Scharr operator to the image
+CV_EXPORTS_W void Scharr( InputArray src, OutputArray dst, int ddepth,
+ int dx, int dy, double scale=1, double delta=0,
+ int borderType=BORDER_DEFAULT );
+
+//! applies Laplacian operator to the image
+CV_EXPORTS_W void Laplacian( InputArray src, OutputArray dst, int ddepth,
+ int ksize=1, double scale=1, double delta=0,
+ int borderType=BORDER_DEFAULT );
+
+//! applies Canny edge detector and produces the edge map.
+CV_EXPORTS_W void Canny( InputArray image, OutputArray edges,
+ double threshold1, double threshold2,
+ int apertureSize=3, bool L2gradient=false );
+
+//! computes minimum eigen value of 2x2 derivative covariation matrix at each pixel - the cornerness criteria
+CV_EXPORTS_W void cornerMinEigenVal( InputArray src, OutputArray dst,
+ int blockSize, int ksize=3,
+ int borderType=BORDER_DEFAULT );
+
+//! computes Harris cornerness criteria at each image pixel
+CV_EXPORTS_W void cornerHarris( InputArray src, OutputArray dst, int blockSize,
+ int ksize, double k,
+ int borderType=BORDER_DEFAULT );
+
+// low-level function for computing eigenvalues and eigenvectors of 2x2 matrices
+CV_EXPORTS void eigen2x2( const float* a, float* e, int n );
+
+//! computes both eigenvalues and the eigenvectors of 2x2 derivative covariation matrix at each pixel. The output is stored as 6-channel matrix.
+CV_EXPORTS_W void cornerEigenValsAndVecs( InputArray src, OutputArray dst,
+ int blockSize, int ksize,
+ int borderType=BORDER_DEFAULT );
+
+//! computes another complex cornerness criteria at each pixel
+CV_EXPORTS_W void preCornerDetect( InputArray src, OutputArray dst, int ksize,
+ int borderType=BORDER_DEFAULT );
+
+//! adjusts the corner locations with sub-pixel accuracy to maximize the certain cornerness criteria
+CV_EXPORTS_W void cornerSubPix( InputArray image, InputOutputArray corners,
+ Size winSize, Size zeroZone,
+ TermCriteria criteria );
+
+//! finds the strong enough corners where the cornerMinEigenVal() or cornerHarris() report the local maxima
+CV_EXPORTS_W void goodFeaturesToTrack( InputArray image, OutputArray corners,
+ int maxCorners, double qualityLevel, double minDistance,
+ InputArray mask=noArray(), int blockSize=3,
+ bool useHarrisDetector=false, double k=0.04 );
+
+//! finds lines in the black-n-white image using the standard or pyramid Hough transform
+CV_EXPORTS_W void HoughLines( InputArray image, OutputArray lines,
+ double rho, double theta, int threshold,
+ double srn=0, double stn=0 );
+
+//! finds line segments in the black-n-white image using probabilistic Hough transform
+CV_EXPORTS_W void HoughLinesP( InputArray image, OutputArray lines,
+ double rho, double theta, int threshold,
+ double minLineLength=0, double maxLineGap=0 );
+
+//! finds circles in the grayscale image using 2+1 gradient Hough transform
+CV_EXPORTS_W void HoughCircles( InputArray image, OutputArray circles,
+ int method, double dp, double minDist,
+ double param1=100, double param2=100,
+ int minRadius=0, int maxRadius=0 );
+
+enum
+{
+ GHT_POSITION = 0,
+ GHT_SCALE = 1,
+ GHT_ROTATION = 2
+};
+
+//! finds arbitrary template in the grayscale image using Generalized Hough Transform
+//! Ballard, D.H. (1981). Generalizing the Hough transform to detect arbitrary shapes. Pattern Recognition 13 (2): 111-122.
+//! Guil, N., González-Linares, J.M. and Zapata, E.L. (1999). Bidimensional shape detection using an invariant approach. Pattern Recognition 32 (6): 1025-1038.
+class CV_EXPORTS GeneralizedHough : public Algorithm
+{
+public:
+ static Ptr<GeneralizedHough> create(int method);
+
+ virtual ~GeneralizedHough();
+
+ //! set template to search
+ void setTemplate(InputArray templ, int cannyThreshold = 100, Point templCenter = Point(-1, -1));
+ void setTemplate(InputArray edges, InputArray dx, InputArray dy, Point templCenter = Point(-1, -1));
+
+ //! find template on image
+ void detect(InputArray image, OutputArray positions, OutputArray votes = cv::noArray(), int cannyThreshold = 100);
+ void detect(InputArray edges, InputArray dx, InputArray dy, OutputArray positions, OutputArray votes = cv::noArray());
+
+ void release();
+
+protected:
+ virtual void setTemplateImpl(const Mat& edges, const Mat& dx, const Mat& dy, Point templCenter) = 0;
+ virtual void detectImpl(const Mat& edges, const Mat& dx, const Mat& dy, OutputArray positions, OutputArray votes) = 0;
+ virtual void releaseImpl() = 0;
+
+private:
+ Mat edges_, dx_, dy_;
+};
+
+//! erodes the image (applies the local minimum operator)
+CV_EXPORTS_W void erode( InputArray src, OutputArray dst, InputArray kernel,
+ Point anchor=Point(-1,-1), int iterations=1,
+ int borderType=BORDER_CONSTANT,
+ const Scalar& borderValue=morphologyDefaultBorderValue() );
+
+//! dilates the image (applies the local maximum operator)
+CV_EXPORTS_W void dilate( InputArray src, OutputArray dst, InputArray kernel,
+ Point anchor=Point(-1,-1), int iterations=1,
+ int borderType=BORDER_CONSTANT,
+ const Scalar& borderValue=morphologyDefaultBorderValue() );
+
+//! applies an advanced morphological operation to the image
+CV_EXPORTS_W void morphologyEx( InputArray src, OutputArray dst,
+ int op, InputArray kernel,
+ Point anchor=Point(-1,-1), int iterations=1,
+ int borderType=BORDER_CONSTANT,
+ const Scalar& borderValue=morphologyDefaultBorderValue() );
+
+//! interpolation algorithm
+enum
+{
+ INTER_NEAREST=CV_INTER_NN, //!< nearest neighbor interpolation
+ INTER_LINEAR=CV_INTER_LINEAR, //!< bilinear interpolation
+ INTER_CUBIC=CV_INTER_CUBIC, //!< bicubic interpolation
+ INTER_AREA=CV_INTER_AREA, //!< area-based (or super) interpolation
+ INTER_LANCZOS4=CV_INTER_LANCZOS4, //!< Lanczos interpolation over 8x8 neighborhood
+ INTER_MAX=7,
+ WARP_INVERSE_MAP=CV_WARP_INVERSE_MAP
+};
+
+//! resizes the image
+CV_EXPORTS_W void resize( InputArray src, OutputArray dst,
+ Size dsize, double fx=0, double fy=0,
+ int interpolation=INTER_LINEAR );
+
+//! warps the image using affine transformation
+CV_EXPORTS_W void warpAffine( InputArray src, OutputArray dst,
+ InputArray M, Size dsize,
+ int flags=INTER_LINEAR,
+ int borderMode=BORDER_CONSTANT,
+ const Scalar& borderValue=Scalar());
+
+//! warps the image using perspective transformation
+CV_EXPORTS_W void warpPerspective( InputArray src, OutputArray dst,
+ InputArray M, Size dsize,
+ int flags=INTER_LINEAR,
+ int borderMode=BORDER_CONSTANT,
+ const Scalar& borderValue=Scalar());
+
+enum
+{
+ INTER_BITS=5, INTER_BITS2=INTER_BITS*2,
+ INTER_TAB_SIZE=(1<<INTER_BITS),
+ INTER_TAB_SIZE2=INTER_TAB_SIZE*INTER_TAB_SIZE
+};
+
+//! warps the image using the precomputed maps. The maps are stored in either floating-point or integer fixed-point format
+CV_EXPORTS_W void remap( InputArray src, OutputArray dst,
+ InputArray map1, InputArray map2,
+ int interpolation, int borderMode=BORDER_CONSTANT,
+ const Scalar& borderValue=Scalar());
+
+//! converts maps for remap from floating-point to fixed-point format or backwards
+CV_EXPORTS_W void convertMaps( InputArray map1, InputArray map2,
+ OutputArray dstmap1, OutputArray dstmap2,
+ int dstmap1type, bool nninterpolation=false );
+
+//! returns 2x3 affine transformation matrix for the planar rotation.
+CV_EXPORTS_W Mat getRotationMatrix2D( Point2f center, double angle, double scale );
+//! returns 3x3 perspective transformation for the corresponding 4 point pairs.
+CV_EXPORTS Mat getPerspectiveTransform( const Point2f src[], const Point2f dst[] );
+//! returns 2x3 affine transformation for the corresponding 3 point pairs.
+CV_EXPORTS Mat getAffineTransform( const Point2f src[], const Point2f dst[] );
+//! computes 2x3 affine transformation matrix that is inverse to the specified 2x3 affine transformation.
+CV_EXPORTS_W void invertAffineTransform( InputArray M, OutputArray iM );
+
+CV_EXPORTS_W Mat getPerspectiveTransform( InputArray src, InputArray dst );
+CV_EXPORTS_W Mat getAffineTransform( InputArray src, InputArray dst );
+
+//! extracts rectangle from the image at sub-pixel location
+CV_EXPORTS_W void getRectSubPix( InputArray image, Size patchSize,
+ Point2f center, OutputArray patch, int patchType=-1 );
+
+//! computes the integral image
+CV_EXPORTS_W void integral( InputArray src, OutputArray sum, int sdepth=-1 );
+
+//! computes the integral image and integral for the squared image
+CV_EXPORTS_AS(integral2) void integral( InputArray src, OutputArray sum,
+ OutputArray sqsum, int sdepth=-1 );
+//! computes the integral image, integral for the squared image and the tilted integral image
+CV_EXPORTS_AS(integral3) void integral( InputArray src, OutputArray sum,
+ OutputArray sqsum, OutputArray tilted,
+ int sdepth=-1 );
+
+//! adds image to the accumulator (dst += src). Unlike cv::add, dst and src can have different types.
+CV_EXPORTS_W void accumulate( InputArray src, InputOutputArray dst,
+ InputArray mask=noArray() );
+//! adds squared src image to the accumulator (dst += src*src).
+CV_EXPORTS_W void accumulateSquare( InputArray src, InputOutputArray dst,
+ InputArray mask=noArray() );
+//! adds product of the 2 images to the accumulator (dst += src1*src2).
+CV_EXPORTS_W void accumulateProduct( InputArray src1, InputArray src2,
+ InputOutputArray dst, InputArray mask=noArray() );
+//! updates the running average (dst = dst*(1-alpha) + src*alpha)
+CV_EXPORTS_W void accumulateWeighted( InputArray src, InputOutputArray dst,
+ double alpha, InputArray mask=noArray() );
+
+//! computes PSNR image/video quality metric
+CV_EXPORTS_W double PSNR(InputArray src1, InputArray src2);
+
+CV_EXPORTS_W Point2d phaseCorrelate(InputArray src1, InputArray src2,
+ InputArray window = noArray());
+CV_EXPORTS_W Point2d phaseCorrelateRes(InputArray src1, InputArray src2,
+ InputArray window, CV_OUT double* response = 0);
+CV_EXPORTS_W void createHanningWindow(OutputArray dst, Size winSize, int type);
+
+//! type of the threshold operation
+enum { THRESH_BINARY=CV_THRESH_BINARY, THRESH_BINARY_INV=CV_THRESH_BINARY_INV,
+ THRESH_TRUNC=CV_THRESH_TRUNC, THRESH_TOZERO=CV_THRESH_TOZERO,
+ THRESH_TOZERO_INV=CV_THRESH_TOZERO_INV, THRESH_MASK=CV_THRESH_MASK,
+ THRESH_OTSU=CV_THRESH_OTSU };
+
+//! applies fixed threshold to the image
+CV_EXPORTS_W double threshold( InputArray src, OutputArray dst,
+ double thresh, double maxval, int type );
+
+//! adaptive threshold algorithm
+enum { ADAPTIVE_THRESH_MEAN_C=0, ADAPTIVE_THRESH_GAUSSIAN_C=1 };
+
+//! applies variable (adaptive) threshold to the image
+CV_EXPORTS_W void adaptiveThreshold( InputArray src, OutputArray dst,
+ double maxValue, int adaptiveMethod,
+ int thresholdType, int blockSize, double C );
+
+//! smooths and downsamples the image
+CV_EXPORTS_W void pyrDown( InputArray src, OutputArray dst,
+ const Size& dstsize=Size(), int borderType=BORDER_DEFAULT );
+//! upsamples and smoothes the image
+CV_EXPORTS_W void pyrUp( InputArray src, OutputArray dst,
+ const Size& dstsize=Size(), int borderType=BORDER_DEFAULT );
+
+//! builds the gaussian pyramid using pyrDown() as a basic operation
+CV_EXPORTS void buildPyramid( InputArray src, OutputArrayOfArrays dst,
+ int maxlevel, int borderType=BORDER_DEFAULT );
+
+//! corrects lens distortion for the given camera matrix and distortion coefficients
+CV_EXPORTS_W void undistort( InputArray src, OutputArray dst,
+ InputArray cameraMatrix,
+ InputArray distCoeffs,
+ InputArray newCameraMatrix=noArray() );
+
+//! initializes maps for cv::remap() to correct lens distortion and optionally rectify the image
+CV_EXPORTS_W void initUndistortRectifyMap( InputArray cameraMatrix, InputArray distCoeffs,
+ InputArray R, InputArray newCameraMatrix,
+ Size size, int m1type, OutputArray map1, OutputArray map2 );
+
+enum
+{
+ PROJ_SPHERICAL_ORTHO = 0,
+ PROJ_SPHERICAL_EQRECT = 1
+};
+
+//! initializes maps for cv::remap() for wide-angle
+CV_EXPORTS_W float initWideAngleProjMap( InputArray cameraMatrix, InputArray distCoeffs,
+ Size imageSize, int destImageWidth,
+ int m1type, OutputArray map1, OutputArray map2,
+ int projType=PROJ_SPHERICAL_EQRECT, double alpha=0);
+
+//! returns the default new camera matrix (by default it is the same as cameraMatrix unless centerPricipalPoint=true)
+CV_EXPORTS_W Mat getDefaultNewCameraMatrix( InputArray cameraMatrix, Size imgsize=Size(),
+ bool centerPrincipalPoint=false );
+
+//! returns points' coordinates after lens distortion correction
+CV_EXPORTS_W void undistortPoints( InputArray src, OutputArray dst,
+ InputArray cameraMatrix, InputArray distCoeffs,
+ InputArray R=noArray(), InputArray P=noArray());
+
+template<> CV_EXPORTS void Ptr<CvHistogram>::delete_obj();
+
+//! computes the joint dense histogram for a set of images.
+CV_EXPORTS void calcHist( const Mat* images, int nimages,
+ const int* channels, InputArray mask,
+ OutputArray hist, int dims, const int* histSize,
+ const float** ranges, bool uniform=true, bool accumulate=false );
+
+//! computes the joint sparse histogram for a set of images.
+CV_EXPORTS void calcHist( const Mat* images, int nimages,
+ const int* channels, InputArray mask,
+ SparseMat& hist, int dims,
+ const int* histSize, const float** ranges,
+ bool uniform=true, bool accumulate=false );
+
+CV_EXPORTS_W void calcHist( InputArrayOfArrays images,
+ const vector<int>& channels,
+ InputArray mask, OutputArray hist,
+ const vector<int>& histSize,
+ const vector<float>& ranges,
+ bool accumulate=false );
+
+//! computes back projection for the set of images
+CV_EXPORTS void calcBackProject( const Mat* images, int nimages,
+ const int* channels, InputArray hist,
+ OutputArray backProject, const float** ranges,
+ double scale=1, bool uniform=true );
+
+//! computes back projection for the set of images
+CV_EXPORTS void calcBackProject( const Mat* images, int nimages,
+ const int* channels, const SparseMat& hist,
+ OutputArray backProject, const float** ranges,
+ double scale=1, bool uniform=true );
+
+CV_EXPORTS_W void calcBackProject( InputArrayOfArrays images, const vector<int>& channels,
+ InputArray hist, OutputArray dst,
+ const vector<float>& ranges,
+ double scale );
+
+/*CV_EXPORTS void calcBackProjectPatch( const Mat* images, int nimages, const int* channels,
+ InputArray hist, OutputArray dst, Size patchSize,
+ int method, double factor=1 );
+
+CV_EXPORTS_W void calcBackProjectPatch( InputArrayOfArrays images, const vector<int>& channels,
+ InputArray hist, OutputArray dst, Size patchSize,
+ int method, double factor=1 );*/
+
+//! compares two histograms stored in dense arrays
+CV_EXPORTS_W double compareHist( InputArray H1, InputArray H2, int method );
+
+//! compares two histograms stored in sparse arrays
+CV_EXPORTS double compareHist( const SparseMat& H1, const SparseMat& H2, int method );
+
+//! normalizes the grayscale image brightness and contrast by normalizing its histogram
+CV_EXPORTS_W void equalizeHist( InputArray src, OutputArray dst );
+
+class CV_EXPORTS_W CLAHE : public Algorithm
+{
+public:
+ CV_WRAP virtual void apply(InputArray src, OutputArray dst) = 0;
+
+ CV_WRAP virtual void setClipLimit(double clipLimit) = 0;
+ CV_WRAP virtual double getClipLimit() const = 0;
+
+ CV_WRAP virtual void setTilesGridSize(Size tileGridSize) = 0;
+ CV_WRAP virtual Size getTilesGridSize() const = 0;
+
+ CV_WRAP virtual void collectGarbage() = 0;
+};
+CV_EXPORTS_W Ptr<CLAHE> createCLAHE(double clipLimit = 40.0, Size tileGridSize = Size(8, 8));
+
+CV_EXPORTS float EMD( InputArray signature1, InputArray signature2,
+ int distType, InputArray cost=noArray(),
+ float* lowerBound=0, OutputArray flow=noArray() );
+
+//! segments the image using watershed algorithm
+CV_EXPORTS_W void watershed( InputArray image, InputOutputArray markers );
+
+//! filters image using meanshift algorithm
+CV_EXPORTS_W void pyrMeanShiftFiltering( InputArray src, OutputArray dst,
+ double sp, double sr, int maxLevel=1,
+ TermCriteria termcrit=TermCriteria(
+ TermCriteria::MAX_ITER+TermCriteria::EPS,5,1) );
+
+//! class of the pixel in GrabCut algorithm
+enum
+{
+ GC_BGD = 0, //!< background
+ GC_FGD = 1, //!< foreground
+ GC_PR_BGD = 2, //!< most probably background
+ GC_PR_FGD = 3 //!< most probably foreground
+};
+
+//! GrabCut algorithm flags
+enum
+{
+ GC_INIT_WITH_RECT = 0,
+ GC_INIT_WITH_MASK = 1,
+ GC_EVAL = 2
+};
+
+//! segments the image using GrabCut algorithm
+CV_EXPORTS_W void grabCut( InputArray img, InputOutputArray mask, Rect rect,
+ InputOutputArray bgdModel, InputOutputArray fgdModel,
+ int iterCount, int mode = GC_EVAL );
+
+enum
+{
+ DIST_LABEL_CCOMP = 0,
+ DIST_LABEL_PIXEL = 1
+};
+
+//! builds the discrete Voronoi diagram
+CV_EXPORTS_AS(distanceTransformWithLabels) void distanceTransform( InputArray src, OutputArray dst,
+ OutputArray labels, int distanceType, int maskSize,
+ int labelType=DIST_LABEL_CCOMP );
+
+//! computes the distance transform map
+CV_EXPORTS_W void distanceTransform( InputArray src, OutputArray dst,
+ int distanceType, int maskSize );
+
+enum { FLOODFILL_FIXED_RANGE = 1 << 16, FLOODFILL_MASK_ONLY = 1 << 17 };
+
+//! fills the semi-uniform image region starting from the specified seed point
+CV_EXPORTS int floodFill( InputOutputArray image,
+ Point seedPoint, Scalar newVal, CV_OUT Rect* rect=0,
+ Scalar loDiff=Scalar(), Scalar upDiff=Scalar(),
+ int flags=4 );
+
+//! fills the semi-uniform image region and/or the mask starting from the specified seed point
+CV_EXPORTS_W int floodFill( InputOutputArray image, InputOutputArray mask,
+ Point seedPoint, Scalar newVal, CV_OUT Rect* rect=0,
+ Scalar loDiff=Scalar(), Scalar upDiff=Scalar(),
+ int flags=4 );
+
+
+enum
+{
+ COLOR_BGR2BGRA =0,
+ COLOR_RGB2RGBA =COLOR_BGR2BGRA,
+
+ COLOR_BGRA2BGR =1,
+ COLOR_RGBA2RGB =COLOR_BGRA2BGR,
+
+ COLOR_BGR2RGBA =2,
+ COLOR_RGB2BGRA =COLOR_BGR2RGBA,
+
+ COLOR_RGBA2BGR =3,
+ COLOR_BGRA2RGB =COLOR_RGBA2BGR,
+
+ COLOR_BGR2RGB =4,
+ COLOR_RGB2BGR =COLOR_BGR2RGB,
+
+ COLOR_BGRA2RGBA =5,
+ COLOR_RGBA2BGRA =COLOR_BGRA2RGBA,
+
+ COLOR_BGR2GRAY =6,
+ COLOR_RGB2GRAY =7,
+ COLOR_GRAY2BGR =8,
+ COLOR_GRAY2RGB =COLOR_GRAY2BGR,
+ COLOR_GRAY2BGRA =9,
+ COLOR_GRAY2RGBA =COLOR_GRAY2BGRA,
+ COLOR_BGRA2GRAY =10,
+ COLOR_RGBA2GRAY =11,
+
+ COLOR_BGR2BGR565 =12,
+ COLOR_RGB2BGR565 =13,
+ COLOR_BGR5652BGR =14,
+ COLOR_BGR5652RGB =15,
+ COLOR_BGRA2BGR565 =16,
+ COLOR_RGBA2BGR565 =17,
+ COLOR_BGR5652BGRA =18,
+ COLOR_BGR5652RGBA =19,
+
+ COLOR_GRAY2BGR565 =20,
+ COLOR_BGR5652GRAY =21,
+
+ COLOR_BGR2BGR555 =22,
+ COLOR_RGB2BGR555 =23,
+ COLOR_BGR5552BGR =24,
+ COLOR_BGR5552RGB =25,
+ COLOR_BGRA2BGR555 =26,
+ COLOR_RGBA2BGR555 =27,
+ COLOR_BGR5552BGRA =28,
+ COLOR_BGR5552RGBA =29,
+
+ COLOR_GRAY2BGR555 =30,
+ COLOR_BGR5552GRAY =31,
+
+ COLOR_BGR2XYZ =32,
+ COLOR_RGB2XYZ =33,
+ COLOR_XYZ2BGR =34,
+ COLOR_XYZ2RGB =35,
+
+ COLOR_BGR2YCrCb =36,
+ COLOR_RGB2YCrCb =37,
+ COLOR_YCrCb2BGR =38,
+ COLOR_YCrCb2RGB =39,
+
+ COLOR_BGR2HSV =40,
+ COLOR_RGB2HSV =41,
+
+ COLOR_BGR2Lab =44,
+ COLOR_RGB2Lab =45,
+
+ COLOR_BayerBG2BGR =46,
+ COLOR_BayerGB2BGR =47,
+ COLOR_BayerRG2BGR =48,
+ COLOR_BayerGR2BGR =49,
+
+ COLOR_BayerBG2RGB =COLOR_BayerRG2BGR,
+ COLOR_BayerGB2RGB =COLOR_BayerGR2BGR,
+ COLOR_BayerRG2RGB =COLOR_BayerBG2BGR,
+ COLOR_BayerGR2RGB =COLOR_BayerGB2BGR,
+
+ COLOR_BGR2Luv =50,
+ COLOR_RGB2Luv =51,
+ COLOR_BGR2HLS =52,
+ COLOR_RGB2HLS =53,
+
+ COLOR_HSV2BGR =54,
+ COLOR_HSV2RGB =55,
+
+ COLOR_Lab2BGR =56,
+ COLOR_Lab2RGB =57,
+ COLOR_Luv2BGR =58,
+ COLOR_Luv2RGB =59,
+ COLOR_HLS2BGR =60,
+ COLOR_HLS2RGB =61,
+
+ COLOR_BayerBG2BGR_VNG =62,
+ COLOR_BayerGB2BGR_VNG =63,
+ COLOR_BayerRG2BGR_VNG =64,
+ COLOR_BayerGR2BGR_VNG =65,
+
+ COLOR_BayerBG2RGB_VNG =COLOR_BayerRG2BGR_VNG,
+ COLOR_BayerGB2RGB_VNG =COLOR_BayerGR2BGR_VNG,
+ COLOR_BayerRG2RGB_VNG =COLOR_BayerBG2BGR_VNG,
+ COLOR_BayerGR2RGB_VNG =COLOR_BayerGB2BGR_VNG,
+
+ COLOR_BGR2HSV_FULL = 66,
+ COLOR_RGB2HSV_FULL = 67,
+ COLOR_BGR2HLS_FULL = 68,
+ COLOR_RGB2HLS_FULL = 69,
+
+ COLOR_HSV2BGR_FULL = 70,
+ COLOR_HSV2RGB_FULL = 71,
+ COLOR_HLS2BGR_FULL = 72,
+ COLOR_HLS2RGB_FULL = 73,
+
+ COLOR_LBGR2Lab = 74,
+ COLOR_LRGB2Lab = 75,
+ COLOR_LBGR2Luv = 76,
+ COLOR_LRGB2Luv = 77,
+
+ COLOR_Lab2LBGR = 78,
+ COLOR_Lab2LRGB = 79,
+ COLOR_Luv2LBGR = 80,
+ COLOR_Luv2LRGB = 81,
+
+ COLOR_BGR2YUV = 82,
+ COLOR_RGB2YUV = 83,
+ COLOR_YUV2BGR = 84,
+ COLOR_YUV2RGB = 85,
+
+ COLOR_BayerBG2GRAY = 86,
+ COLOR_BayerGB2GRAY = 87,
+ COLOR_BayerRG2GRAY = 88,
+ COLOR_BayerGR2GRAY = 89,
+
+ //YUV 4:2:0 formats family
+ COLOR_YUV2RGB_NV12 = 90,
+ COLOR_YUV2BGR_NV12 = 91,
+ COLOR_YUV2RGB_NV21 = 92,
+ COLOR_YUV2BGR_NV21 = 93,
+ COLOR_YUV420sp2RGB = COLOR_YUV2RGB_NV21,
+ COLOR_YUV420sp2BGR = COLOR_YUV2BGR_NV21,
+
+ COLOR_YUV2RGBA_NV12 = 94,
+ COLOR_YUV2BGRA_NV12 = 95,
+ COLOR_YUV2RGBA_NV21 = 96,
+ COLOR_YUV2BGRA_NV21 = 97,
+ COLOR_YUV420sp2RGBA = COLOR_YUV2RGBA_NV21,
+ COLOR_YUV420sp2BGRA = COLOR_YUV2BGRA_NV21,
+
+ COLOR_YUV2RGB_YV12 = 98,
+ COLOR_YUV2BGR_YV12 = 99,
+ COLOR_YUV2RGB_IYUV = 100,
+ COLOR_YUV2BGR_IYUV = 101,
+ COLOR_YUV2RGB_I420 = COLOR_YUV2RGB_IYUV,
+ COLOR_YUV2BGR_I420 = COLOR_YUV2BGR_IYUV,
+ COLOR_YUV420p2RGB = COLOR_YUV2RGB_YV12,
+ COLOR_YUV420p2BGR = COLOR_YUV2BGR_YV12,
+
+ COLOR_YUV2RGBA_YV12 = 102,
+ COLOR_YUV2BGRA_YV12 = 103,
+ COLOR_YUV2RGBA_IYUV = 104,
+ COLOR_YUV2BGRA_IYUV = 105,
+ COLOR_YUV2RGBA_I420 = COLOR_YUV2RGBA_IYUV,
+ COLOR_YUV2BGRA_I420 = COLOR_YUV2BGRA_IYUV,
+ COLOR_YUV420p2RGBA = COLOR_YUV2RGBA_YV12,
+ COLOR_YUV420p2BGRA = COLOR_YUV2BGRA_YV12,
+
+ COLOR_YUV2GRAY_420 = 106,
+ COLOR_YUV2GRAY_NV21 = COLOR_YUV2GRAY_420,
+ COLOR_YUV2GRAY_NV12 = COLOR_YUV2GRAY_420,
+ COLOR_YUV2GRAY_YV12 = COLOR_YUV2GRAY_420,
+ COLOR_YUV2GRAY_IYUV = COLOR_YUV2GRAY_420,
+ COLOR_YUV2GRAY_I420 = COLOR_YUV2GRAY_420,
+ COLOR_YUV420sp2GRAY = COLOR_YUV2GRAY_420,
+ COLOR_YUV420p2GRAY = COLOR_YUV2GRAY_420,
+
+ //YUV 4:2:2 formats family
+ COLOR_YUV2RGB_UYVY = 107,
+ COLOR_YUV2BGR_UYVY = 108,
+ //COLOR_YUV2RGB_VYUY = 109,
+ //COLOR_YUV2BGR_VYUY = 110,
+ COLOR_YUV2RGB_Y422 = COLOR_YUV2RGB_UYVY,
+ COLOR_YUV2BGR_Y422 = COLOR_YUV2BGR_UYVY,
+ COLOR_YUV2RGB_UYNV = COLOR_YUV2RGB_UYVY,
+ COLOR_YUV2BGR_UYNV = COLOR_YUV2BGR_UYVY,
+
+ COLOR_YUV2RGBA_UYVY = 111,
+ COLOR_YUV2BGRA_UYVY = 112,
+ //COLOR_YUV2RGBA_VYUY = 113,
+ //COLOR_YUV2BGRA_VYUY = 114,
+ COLOR_YUV2RGBA_Y422 = COLOR_YUV2RGBA_UYVY,
+ COLOR_YUV2BGRA_Y422 = COLOR_YUV2BGRA_UYVY,
+ COLOR_YUV2RGBA_UYNV = COLOR_YUV2RGBA_UYVY,
+ COLOR_YUV2BGRA_UYNV = COLOR_YUV2BGRA_UYVY,
+
+ COLOR_YUV2RGB_YUY2 = 115,
+ COLOR_YUV2BGR_YUY2 = 116,
+ COLOR_YUV2RGB_YVYU = 117,
+ COLOR_YUV2BGR_YVYU = 118,
+ COLOR_YUV2RGB_YUYV = COLOR_YUV2RGB_YUY2,
+ COLOR_YUV2BGR_YUYV = COLOR_YUV2BGR_YUY2,
+ COLOR_YUV2RGB_YUNV = COLOR_YUV2RGB_YUY2,
+ COLOR_YUV2BGR_YUNV = COLOR_YUV2BGR_YUY2,
+
+ COLOR_YUV2RGBA_YUY2 = 119,
+ COLOR_YUV2BGRA_YUY2 = 120,
+ COLOR_YUV2RGBA_YVYU = 121,
+ COLOR_YUV2BGRA_YVYU = 122,
+ COLOR_YUV2RGBA_YUYV = COLOR_YUV2RGBA_YUY2,
+ COLOR_YUV2BGRA_YUYV = COLOR_YUV2BGRA_YUY2,
+ COLOR_YUV2RGBA_YUNV = COLOR_YUV2RGBA_YUY2,
+ COLOR_YUV2BGRA_YUNV = COLOR_YUV2BGRA_YUY2,
+
+ COLOR_YUV2GRAY_UYVY = 123,
+ COLOR_YUV2GRAY_YUY2 = 124,
+ //COLOR_YUV2GRAY_VYUY = COLOR_YUV2GRAY_UYVY,
+ COLOR_YUV2GRAY_Y422 = COLOR_YUV2GRAY_UYVY,
+ COLOR_YUV2GRAY_UYNV = COLOR_YUV2GRAY_UYVY,
+ COLOR_YUV2GRAY_YVYU = COLOR_YUV2GRAY_YUY2,
+ COLOR_YUV2GRAY_YUYV = COLOR_YUV2GRAY_YUY2,
+ COLOR_YUV2GRAY_YUNV = COLOR_YUV2GRAY_YUY2,
+
+ // alpha premultiplication
+ COLOR_RGBA2mRGBA = 125,
+ COLOR_mRGBA2RGBA = 126,
+
+ COLOR_RGB2YUV_I420 = 127,
+ COLOR_BGR2YUV_I420 = 128,
+ COLOR_RGB2YUV_IYUV = COLOR_RGB2YUV_I420,
+ COLOR_BGR2YUV_IYUV = COLOR_BGR2YUV_I420,
+
+ COLOR_RGBA2YUV_I420 = 129,
+ COLOR_BGRA2YUV_I420 = 130,
+ COLOR_RGBA2YUV_IYUV = COLOR_RGBA2YUV_I420,
+ COLOR_BGRA2YUV_IYUV = COLOR_BGRA2YUV_I420,
+ COLOR_RGB2YUV_YV12 = 131,
+ COLOR_BGR2YUV_YV12 = 132,
+ COLOR_RGBA2YUV_YV12 = 133,
+ COLOR_BGRA2YUV_YV12 = 134,
+
+ COLOR_COLORCVT_MAX = 135
+};
+
+
+//! converts image from one color space to another
+CV_EXPORTS_W void cvtColor( InputArray src, OutputArray dst, int code, int dstCn=0 );
+
+//! raster image moments
+class CV_EXPORTS_W_MAP Moments
+{
+public:
+ //! the default constructor
+ Moments();
+ //! the full constructor
+ Moments(double m00, double m10, double m01, double m20, double m11,
+ double m02, double m30, double m21, double m12, double m03 );
+ //! the conversion from CvMoments
+ Moments( const CvMoments& moments );
+ //! the conversion to CvMoments
+ operator CvMoments() const;
+
+ //! spatial moments
+ CV_PROP_RW double m00, m10, m01, m20, m11, m02, m30, m21, m12, m03;
+ //! central moments
+ CV_PROP_RW double mu20, mu11, mu02, mu30, mu21, mu12, mu03;
+ //! central normalized moments
+ CV_PROP_RW double nu20, nu11, nu02, nu30, nu21, nu12, nu03;
+};
+
+//! computes moments of the rasterized shape or a vector of points
+CV_EXPORTS_W Moments moments( InputArray array, bool binaryImage=false );
+
+//! computes 7 Hu invariants from the moments
+CV_EXPORTS void HuMoments( const Moments& moments, double hu[7] );
+CV_EXPORTS_W void HuMoments( const Moments& m, CV_OUT OutputArray hu );
+
+//! type of the template matching operation
+enum { TM_SQDIFF=0, TM_SQDIFF_NORMED=1, TM_CCORR=2, TM_CCORR_NORMED=3, TM_CCOEFF=4, TM_CCOEFF_NORMED=5 };
+
+//! computes the proximity map for the raster template and the image where the template is searched for
+CV_EXPORTS_W void matchTemplate( InputArray image, InputArray templ,
+ OutputArray result, int method );
+
+//! mode of the contour retrieval algorithm
+enum
+{
+ RETR_EXTERNAL=CV_RETR_EXTERNAL, //!< retrieve only the most external (top-level) contours
+ RETR_LIST=CV_RETR_LIST, //!< retrieve all the contours without any hierarchical information
+ RETR_CCOMP=CV_RETR_CCOMP, //!< retrieve the connected components (that can possibly be nested)
+ RETR_TREE=CV_RETR_TREE, //!< retrieve all the contours and the whole hierarchy
+ RETR_FLOODFILL=CV_RETR_FLOODFILL
+};
+
+//! the contour approximation algorithm
+enum
+{
+ CHAIN_APPROX_NONE=CV_CHAIN_APPROX_NONE,
+ CHAIN_APPROX_SIMPLE=CV_CHAIN_APPROX_SIMPLE,
+ CHAIN_APPROX_TC89_L1=CV_CHAIN_APPROX_TC89_L1,
+ CHAIN_APPROX_TC89_KCOS=CV_CHAIN_APPROX_TC89_KCOS
+};
+
+//! retrieves contours and the hierarchical information from black-n-white image.
+CV_EXPORTS_W void findContours( InputOutputArray image, OutputArrayOfArrays contours,
+ OutputArray hierarchy, int mode,
+ int method, Point offset=Point());
+
+//! retrieves contours from black-n-white image.
+CV_EXPORTS void findContours( InputOutputArray image, OutputArrayOfArrays contours,
+ int mode, int method, Point offset=Point());
+
+//! draws contours in the image
+CV_EXPORTS_W void drawContours( InputOutputArray image, InputArrayOfArrays contours,
+ int contourIdx, const Scalar& color,
+ int thickness=1, int lineType=8,
+ InputArray hierarchy=noArray(),
+ int maxLevel=INT_MAX, Point offset=Point() );
+
+//! approximates contour or a curve using Douglas-Peucker algorithm
+CV_EXPORTS_W void approxPolyDP( InputArray curve,
+ OutputArray approxCurve,
+ double epsilon, bool closed );
+
+//! computes the contour perimeter (closed=true) or a curve length
+CV_EXPORTS_W double arcLength( InputArray curve, bool closed );
+//! computes the bounding rectangle for a contour
+CV_EXPORTS_W Rect boundingRect( InputArray points );
+//! computes the contour area
+CV_EXPORTS_W double contourArea( InputArray contour, bool oriented=false );
+//! computes the minimal rotated rectangle for a set of points
+CV_EXPORTS_W RotatedRect minAreaRect( InputArray points );
+//! computes the minimal enclosing circle for a set of points
+CV_EXPORTS_W void minEnclosingCircle( InputArray points,
+ CV_OUT Point2f& center, CV_OUT float& radius );
+//! matches two contours using one of the available algorithms
+CV_EXPORTS_W double matchShapes( InputArray contour1, InputArray contour2,
+ int method, double parameter );
+//! computes convex hull for a set of 2D points.
+CV_EXPORTS_W void convexHull( InputArray points, OutputArray hull,
+ bool clockwise=false, bool returnPoints=true );
+//! computes the contour convexity defects
+CV_EXPORTS_W void convexityDefects( InputArray contour, InputArray convexhull, OutputArray convexityDefects );
+
+//! returns true if the contour is convex. Does not support contours with self-intersection
+CV_EXPORTS_W bool isContourConvex( InputArray contour );
+
+//! finds intersection of two convex polygons
+CV_EXPORTS_W float intersectConvexConvex( InputArray _p1, InputArray _p2,
+ OutputArray _p12, bool handleNested=true );
+
+//! fits ellipse to the set of 2D points
+CV_EXPORTS_W RotatedRect fitEllipse( InputArray points );
+
+//! fits line to the set of 2D points using M-estimator algorithm
+CV_EXPORTS_W void fitLine( InputArray points, OutputArray line, int distType,
+ double param, double reps, double aeps );
+//! checks if the point is inside the contour. Optionally computes the signed distance from the point to the contour boundary
+CV_EXPORTS_W double pointPolygonTest( InputArray contour, Point2f pt, bool measureDist );
+
+
+class CV_EXPORTS_W Subdiv2D
+{
+public:
+ enum
+ {
+ PTLOC_ERROR = -2,
+ PTLOC_OUTSIDE_RECT = -1,
+ PTLOC_INSIDE = 0,
+ PTLOC_VERTEX = 1,
+ PTLOC_ON_EDGE = 2
+ };
+
+ enum
+ {
+ NEXT_AROUND_ORG = 0x00,
+ NEXT_AROUND_DST = 0x22,
+ PREV_AROUND_ORG = 0x11,
+ PREV_AROUND_DST = 0x33,
+ NEXT_AROUND_LEFT = 0x13,
+ NEXT_AROUND_RIGHT = 0x31,
+ PREV_AROUND_LEFT = 0x20,
+ PREV_AROUND_RIGHT = 0x02
+ };
+
+ CV_WRAP Subdiv2D();
+ CV_WRAP Subdiv2D(Rect rect);
+ CV_WRAP void initDelaunay(Rect rect);
+
+ CV_WRAP int insert(Point2f pt);
+ CV_WRAP void insert(const vector<Point2f>& ptvec);
+ CV_WRAP int locate(Point2f pt, CV_OUT int& edge, CV_OUT int& vertex);
+
+ CV_WRAP int findNearest(Point2f pt, CV_OUT Point2f* nearestPt=0);
+ CV_WRAP void getEdgeList(CV_OUT vector<Vec4f>& edgeList) const;
+ CV_WRAP void getTriangleList(CV_OUT vector<Vec6f>& triangleList) const;
+ CV_WRAP void getVoronoiFacetList(const vector<int>& idx, CV_OUT vector<vector<Point2f> >& facetList,
+ CV_OUT vector<Point2f>& facetCenters);
+
+ CV_WRAP Point2f getVertex(int vertex, CV_OUT int* firstEdge=0) const;
+
+ CV_WRAP int getEdge( int edge, int nextEdgeType ) const;
+ CV_WRAP int nextEdge(int edge) const;
+ CV_WRAP int rotateEdge(int edge, int rotate) const;
+ CV_WRAP int symEdge(int edge) const;
+ CV_WRAP int edgeOrg(int edge, CV_OUT Point2f* orgpt=0) const;
+ CV_WRAP int edgeDst(int edge, CV_OUT Point2f* dstpt=0) const;
+
+protected:
+ int newEdge();
+ void deleteEdge(int edge);
+ int newPoint(Point2f pt, bool isvirtual, int firstEdge=0);
+ void deletePoint(int vtx);
+ void setEdgePoints( int edge, int orgPt, int dstPt );
+ void splice( int edgeA, int edgeB );
+ int connectEdges( int edgeA, int edgeB );
+ void swapEdges( int edge );
+ int isRightOf(Point2f pt, int edge) const;
+ void calcVoronoi();
+ void clearVoronoi();
+ void checkSubdiv() const;
+
+ struct CV_EXPORTS Vertex
+ {
+ Vertex();
+ Vertex(Point2f pt, bool _isvirtual, int _firstEdge=0);
+ bool isvirtual() const;
+ bool isfree() const;
+ int firstEdge;
+ int type;
+ Point2f pt;
+ };
+ struct CV_EXPORTS QuadEdge
+ {
+ QuadEdge();
+ QuadEdge(int edgeidx);
+ bool isfree() const;
+ int next[4];
+ int pt[4];
+ };
+
+ vector<Vertex> vtx;
+ vector<QuadEdge> qedges;
+ int freeQEdge;
+ int freePoint;
+ bool validGeometry;
+
+ int recentEdge;
+ Point2f topLeft;
+ Point2f bottomRight;
+};
+
+}
+
+#endif /* __cplusplus */
+
+#endif
+
+/* End of file. */
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/imgproc/imgproc_c.h b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/imgproc/imgproc_c.h
new file mode 100644
index 00000000..46d9f013
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/imgproc/imgproc_c.h
@@ -0,0 +1,623 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_IMGPROC_IMGPROC_C_H__
+#define __OPENCV_IMGPROC_IMGPROC_C_H__
+
+#include "opencv2/core/core_c.h"
+#include "opencv2/imgproc/types_c.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*********************** Background statistics accumulation *****************************/
+
+/* Adds image to accumulator */
+CVAPI(void) cvAcc( const CvArr* image, CvArr* sum,
+ const CvArr* mask CV_DEFAULT(NULL) );
+
+/* Adds squared image to accumulator */
+CVAPI(void) cvSquareAcc( const CvArr* image, CvArr* sqsum,
+ const CvArr* mask CV_DEFAULT(NULL) );
+
+/* Adds a product of two images to accumulator */
+CVAPI(void) cvMultiplyAcc( const CvArr* image1, const CvArr* image2, CvArr* acc,
+ const CvArr* mask CV_DEFAULT(NULL) );
+
+/* Adds image to accumulator with weights: acc = acc*(1-alpha) + image*alpha */
+CVAPI(void) cvRunningAvg( const CvArr* image, CvArr* acc, double alpha,
+ const CvArr* mask CV_DEFAULT(NULL) );
+
+/****************************************************************************************\
+* Image Processing *
+\****************************************************************************************/
+
+/* Copies source 2D array inside of the larger destination array and
+ makes a border of the specified type (IPL_BORDER_*) around the copied area. */
+CVAPI(void) cvCopyMakeBorder( const CvArr* src, CvArr* dst, CvPoint offset,
+ int bordertype, CvScalar value CV_DEFAULT(cvScalarAll(0)));
+
+/* Smoothes array (removes noise) */
+CVAPI(void) cvSmooth( const CvArr* src, CvArr* dst,
+ int smoothtype CV_DEFAULT(CV_GAUSSIAN),
+ int size1 CV_DEFAULT(3),
+ int size2 CV_DEFAULT(0),
+ double sigma1 CV_DEFAULT(0),
+ double sigma2 CV_DEFAULT(0));
+
+/* Convolves the image with the kernel */
+CVAPI(void) cvFilter2D( const CvArr* src, CvArr* dst, const CvMat* kernel,
+ CvPoint anchor CV_DEFAULT(cvPoint(-1,-1)));
+
+/* Finds integral image: SUM(X,Y) = sum(x<X,y<Y)I(x,y) */
+CVAPI(void) cvIntegral( const CvArr* image, CvArr* sum,
+ CvArr* sqsum CV_DEFAULT(NULL),
+ CvArr* tilted_sum CV_DEFAULT(NULL));
+
+/*
+ Smoothes the input image with gaussian kernel and then down-samples it.
+ dst_width = floor(src_width/2)[+1],
+ dst_height = floor(src_height/2)[+1]
+*/
+CVAPI(void) cvPyrDown( const CvArr* src, CvArr* dst,
+ int filter CV_DEFAULT(CV_GAUSSIAN_5x5) );
+
+/*
+ Up-samples image and smoothes the result with gaussian kernel.
+ dst_width = src_width*2,
+ dst_height = src_height*2
+*/
+CVAPI(void) cvPyrUp( const CvArr* src, CvArr* dst,
+ int filter CV_DEFAULT(CV_GAUSSIAN_5x5) );
+
+/* Builds pyramid for an image */
+CVAPI(CvMat**) cvCreatePyramid( const CvArr* img, int extra_layers, double rate,
+ const CvSize* layer_sizes CV_DEFAULT(0),
+ CvArr* bufarr CV_DEFAULT(0),
+ int calc CV_DEFAULT(1),
+ int filter CV_DEFAULT(CV_GAUSSIAN_5x5) );
+
+/* Releases pyramid */
+CVAPI(void) cvReleasePyramid( CvMat*** pyramid, int extra_layers );
+
+
+/* Filters image using meanshift algorithm */
+CVAPI(void) cvPyrMeanShiftFiltering( const CvArr* src, CvArr* dst,
+ double sp, double sr, int max_level CV_DEFAULT(1),
+ CvTermCriteria termcrit CV_DEFAULT(cvTermCriteria(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS,5,1)));
+
+/* Segments image using seed "markers" */
+CVAPI(void) cvWatershed( const CvArr* image, CvArr* markers );
+
+/* Calculates an image derivative using generalized Sobel
+ (aperture_size = 1,3,5,7) or Scharr (aperture_size = -1) operator.
+ Scharr can be used only for the first dx or dy derivative */
+CVAPI(void) cvSobel( const CvArr* src, CvArr* dst,
+ int xorder, int yorder,
+ int aperture_size CV_DEFAULT(3));
+
+/* Calculates the image Laplacian: (d2/dx + d2/dy)I */
+CVAPI(void) cvLaplace( const CvArr* src, CvArr* dst,
+ int aperture_size CV_DEFAULT(3) );
+
+/* Converts input array pixels from one color space to another */
+CVAPI(void) cvCvtColor( const CvArr* src, CvArr* dst, int code );
+
+
+/* Resizes image (input array is resized to fit the destination array) */
+CVAPI(void) cvResize( const CvArr* src, CvArr* dst,
+ int interpolation CV_DEFAULT( CV_INTER_LINEAR ));
+
+/* Warps image with affine transform */
+CVAPI(void) cvWarpAffine( const CvArr* src, CvArr* dst, const CvMat* map_matrix,
+ int flags CV_DEFAULT(CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS),
+ CvScalar fillval CV_DEFAULT(cvScalarAll(0)) );
+
+/* Computes affine transform matrix for mapping src[i] to dst[i] (i=0,1,2) */
+CVAPI(CvMat*) cvGetAffineTransform( const CvPoint2D32f * src,
+ const CvPoint2D32f * dst,
+ CvMat * map_matrix );
+
+/* Computes rotation_matrix matrix */
+CVAPI(CvMat*) cv2DRotationMatrix( CvPoint2D32f center, double angle,
+ double scale, CvMat* map_matrix );
+
+/* Warps image with perspective (projective) transform */
+CVAPI(void) cvWarpPerspective( const CvArr* src, CvArr* dst, const CvMat* map_matrix,
+ int flags CV_DEFAULT(CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS),
+ CvScalar fillval CV_DEFAULT(cvScalarAll(0)) );
+
+/* Computes perspective transform matrix for mapping src[i] to dst[i] (i=0,1,2,3) */
+CVAPI(CvMat*) cvGetPerspectiveTransform( const CvPoint2D32f* src,
+ const CvPoint2D32f* dst,
+ CvMat* map_matrix );
+
+/* Performs generic geometric transformation using the specified coordinate maps */
+CVAPI(void) cvRemap( const CvArr* src, CvArr* dst,
+ const CvArr* mapx, const CvArr* mapy,
+ int flags CV_DEFAULT(CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS),
+ CvScalar fillval CV_DEFAULT(cvScalarAll(0)) );
+
+/* Converts mapx & mapy from floating-point to integer formats for cvRemap */
+CVAPI(void) cvConvertMaps( const CvArr* mapx, const CvArr* mapy,
+ CvArr* mapxy, CvArr* mapalpha );
+
+/* Performs forward or inverse log-polar image transform */
+CVAPI(void) cvLogPolar( const CvArr* src, CvArr* dst,
+ CvPoint2D32f center, double M,
+ int flags CV_DEFAULT(CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS));
+
+/* Performs forward or inverse linear-polar image transform */
+CVAPI(void) cvLinearPolar( const CvArr* src, CvArr* dst,
+ CvPoint2D32f center, double maxRadius,
+ int flags CV_DEFAULT(CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS));
+
+/* Transforms the input image to compensate lens distortion */
+CVAPI(void) cvUndistort2( const CvArr* src, CvArr* dst,
+ const CvMat* camera_matrix,
+ const CvMat* distortion_coeffs,
+ const CvMat* new_camera_matrix CV_DEFAULT(0) );
+
+/* Computes transformation map from intrinsic camera parameters
+ that can used by cvRemap */
+CVAPI(void) cvInitUndistortMap( const CvMat* camera_matrix,
+ const CvMat* distortion_coeffs,
+ CvArr* mapx, CvArr* mapy );
+
+/* Computes undistortion+rectification map for a head of stereo camera */
+CVAPI(void) cvInitUndistortRectifyMap( const CvMat* camera_matrix,
+ const CvMat* dist_coeffs,
+ const CvMat *R, const CvMat* new_camera_matrix,
+ CvArr* mapx, CvArr* mapy );
+
+/* Computes the original (undistorted) feature coordinates
+ from the observed (distorted) coordinates */
+CVAPI(void) cvUndistortPoints( const CvMat* src, CvMat* dst,
+ const CvMat* camera_matrix,
+ const CvMat* dist_coeffs,
+ const CvMat* R CV_DEFAULT(0),
+ const CvMat* P CV_DEFAULT(0));
+
+/* creates structuring element used for morphological operations */
+CVAPI(IplConvKernel*) cvCreateStructuringElementEx(
+ int cols, int rows, int anchor_x, int anchor_y,
+ int shape, int* values CV_DEFAULT(NULL) );
+
+/* releases structuring element */
+CVAPI(void) cvReleaseStructuringElement( IplConvKernel** element );
+
+/* erodes input image (applies minimum filter) one or more times.
+ If element pointer is NULL, 3x3 rectangular element is used */
+CVAPI(void) cvErode( const CvArr* src, CvArr* dst,
+ IplConvKernel* element CV_DEFAULT(NULL),
+ int iterations CV_DEFAULT(1) );
+
+/* dilates input image (applies maximum filter) one or more times.
+ If element pointer is NULL, 3x3 rectangular element is used */
+CVAPI(void) cvDilate( const CvArr* src, CvArr* dst,
+ IplConvKernel* element CV_DEFAULT(NULL),
+ int iterations CV_DEFAULT(1) );
+
+/* Performs complex morphological transformation */
+CVAPI(void) cvMorphologyEx( const CvArr* src, CvArr* dst,
+ CvArr* temp, IplConvKernel* element,
+ int operation, int iterations CV_DEFAULT(1) );
+
+/* Calculates all spatial and central moments up to the 3rd order */
+CVAPI(void) cvMoments( const CvArr* arr, CvMoments* moments, int binary CV_DEFAULT(0));
+
+/* Retrieve particular spatial, central or normalized central moments */
+CVAPI(double) cvGetSpatialMoment( CvMoments* moments, int x_order, int y_order );
+CVAPI(double) cvGetCentralMoment( CvMoments* moments, int x_order, int y_order );
+CVAPI(double) cvGetNormalizedCentralMoment( CvMoments* moments,
+ int x_order, int y_order );
+
+/* Calculates 7 Hu's invariants from precalculated spatial and central moments */
+CVAPI(void) cvGetHuMoments( CvMoments* moments, CvHuMoments* hu_moments );
+
+/*********************************** data sampling **************************************/
+
+/* Fetches pixels that belong to the specified line segment and stores them to the buffer.
+ Returns the number of retrieved points. */
+CVAPI(int) cvSampleLine( const CvArr* image, CvPoint pt1, CvPoint pt2, void* buffer,
+ int connectivity CV_DEFAULT(8));
+
+/* Retrieves the rectangular image region with specified center from the input array.
+ dst(x,y) <- src(x + center.x - dst_width/2, y + center.y - dst_height/2).
+ Values of pixels with fractional coordinates are retrieved using bilinear interpolation*/
+CVAPI(void) cvGetRectSubPix( const CvArr* src, CvArr* dst, CvPoint2D32f center );
+
+
+/* Retrieves quadrangle from the input array.
+ matrixarr = ( a11 a12 | b1 ) dst(x,y) <- src(A[x y]' + b)
+ ( a21 a22 | b2 ) (bilinear interpolation is used to retrieve pixels
+ with fractional coordinates)
+*/
+CVAPI(void) cvGetQuadrangleSubPix( const CvArr* src, CvArr* dst,
+ const CvMat* map_matrix );
+
+/* Measures similarity between template and overlapped windows in the source image
+ and fills the resultant image with the measurements */
+CVAPI(void) cvMatchTemplate( const CvArr* image, const CvArr* templ,
+ CvArr* result, int method );
+
+/* Computes earth mover distance between
+ two weighted point sets (called signatures) */
+CVAPI(float) cvCalcEMD2( const CvArr* signature1,
+ const CvArr* signature2,
+ int distance_type,
+ CvDistanceFunction distance_func CV_DEFAULT(NULL),
+ const CvArr* cost_matrix CV_DEFAULT(NULL),
+ CvArr* flow CV_DEFAULT(NULL),
+ float* lower_bound CV_DEFAULT(NULL),
+ void* userdata CV_DEFAULT(NULL));
+
+/****************************************************************************************\
+* Contours retrieving *
+\****************************************************************************************/
+
+/* Retrieves outer and optionally inner boundaries of white (non-zero) connected
+ components in the black (zero) background */
+CVAPI(int) cvFindContours( CvArr* image, CvMemStorage* storage, CvSeq** first_contour,
+ int header_size CV_DEFAULT(sizeof(CvContour)),
+ int mode CV_DEFAULT(CV_RETR_LIST),
+ int method CV_DEFAULT(CV_CHAIN_APPROX_SIMPLE),
+ CvPoint offset CV_DEFAULT(cvPoint(0,0)));
+
+/* Initializes contour retrieving process.
+ Calls cvStartFindContours.
+ Calls cvFindNextContour until null pointer is returned
+ or some other condition becomes true.
+ Calls cvEndFindContours at the end. */
+CVAPI(CvContourScanner) cvStartFindContours( CvArr* image, CvMemStorage* storage,
+ int header_size CV_DEFAULT(sizeof(CvContour)),
+ int mode CV_DEFAULT(CV_RETR_LIST),
+ int method CV_DEFAULT(CV_CHAIN_APPROX_SIMPLE),
+ CvPoint offset CV_DEFAULT(cvPoint(0,0)));
+
+/* Retrieves next contour */
+CVAPI(CvSeq*) cvFindNextContour( CvContourScanner scanner );
+
+
+/* Substitutes the last retrieved contour with the new one
+ (if the substitutor is null, the last retrieved contour is removed from the tree) */
+CVAPI(void) cvSubstituteContour( CvContourScanner scanner, CvSeq* new_contour );
+
+
+/* Releases contour scanner and returns pointer to the first outer contour */
+CVAPI(CvSeq*) cvEndFindContours( CvContourScanner* scanner );
+
+/* Approximates a single Freeman chain or a tree of chains to polygonal curves */
+CVAPI(CvSeq*) cvApproxChains( CvSeq* src_seq, CvMemStorage* storage,
+ int method CV_DEFAULT(CV_CHAIN_APPROX_SIMPLE),
+ double parameter CV_DEFAULT(0),
+ int minimal_perimeter CV_DEFAULT(0),
+ int recursive CV_DEFAULT(0));
+
+/* Initializes Freeman chain reader.
+ The reader is used to iteratively get coordinates of all the chain points.
+ If the Freeman codes should be read as is, a simple sequence reader should be used */
+CVAPI(void) cvStartReadChainPoints( CvChain* chain, CvChainPtReader* reader );
+
+/* Retrieves the next chain point */
+CVAPI(CvPoint) cvReadChainPoint( CvChainPtReader* reader );
+
+
+/****************************************************************************************\
+* Contour Processing and Shape Analysis *
+\****************************************************************************************/
+
+/* Approximates a single polygonal curve (contour) or
+ a tree of polygonal curves (contours) */
+CVAPI(CvSeq*) cvApproxPoly( const void* src_seq,
+ int header_size, CvMemStorage* storage,
+ int method, double eps,
+ int recursive CV_DEFAULT(0));
+
+/* Calculates perimeter of a contour or length of a part of contour */
+CVAPI(double) cvArcLength( const void* curve,
+ CvSlice slice CV_DEFAULT(CV_WHOLE_SEQ),
+ int is_closed CV_DEFAULT(-1));
+
+CV_INLINE double cvContourPerimeter( const void* contour )
+{
+ return cvArcLength( contour, CV_WHOLE_SEQ, 1 );
+}
+
+
+/* Calculates contour bounding rectangle (update=1) or
+ just retrieves pre-calculated rectangle (update=0) */
+CVAPI(CvRect) cvBoundingRect( CvArr* points, int update CV_DEFAULT(0) );
+
+/* Calculates area of a contour or contour segment */
+CVAPI(double) cvContourArea( const CvArr* contour,
+ CvSlice slice CV_DEFAULT(CV_WHOLE_SEQ),
+ int oriented CV_DEFAULT(0));
+
+/* Finds minimum area rotated rectangle bounding a set of points */
+CVAPI(CvBox2D) cvMinAreaRect2( const CvArr* points,
+ CvMemStorage* storage CV_DEFAULT(NULL));
+
+/* Finds minimum enclosing circle for a set of points */
+CVAPI(int) cvMinEnclosingCircle( const CvArr* points,
+ CvPoint2D32f* center, float* radius );
+
+/* Compares two contours by matching their moments */
+CVAPI(double) cvMatchShapes( const void* object1, const void* object2,
+ int method, double parameter CV_DEFAULT(0));
+
+/* Calculates exact convex hull of 2d point set */
+CVAPI(CvSeq*) cvConvexHull2( const CvArr* input,
+ void* hull_storage CV_DEFAULT(NULL),
+ int orientation CV_DEFAULT(CV_CLOCKWISE),
+ int return_points CV_DEFAULT(0));
+
+/* Checks whether the contour is convex or not (returns 1 if convex, 0 if not) */
+CVAPI(int) cvCheckContourConvexity( const CvArr* contour );
+
+
+/* Finds convexity defects for the contour */
+CVAPI(CvSeq*) cvConvexityDefects( const CvArr* contour, const CvArr* convexhull,
+ CvMemStorage* storage CV_DEFAULT(NULL));
+
+/* Fits ellipse into a set of 2d points */
+CVAPI(CvBox2D) cvFitEllipse2( const CvArr* points );
+
+/* Finds minimum rectangle containing two given rectangles */
+CVAPI(CvRect) cvMaxRect( const CvRect* rect1, const CvRect* rect2 );
+
+/* Finds coordinates of the box vertices */
+CVAPI(void) cvBoxPoints( CvBox2D box, CvPoint2D32f pt[4] );
+
+/* Initializes sequence header for a matrix (column or row vector) of points -
+ a wrapper for cvMakeSeqHeaderForArray (it does not initialize bounding rectangle!!!) */
+CVAPI(CvSeq*) cvPointSeqFromMat( int seq_kind, const CvArr* mat,
+ CvContour* contour_header,
+ CvSeqBlock* block );
+
+/* Checks whether the point is inside polygon, outside, on an edge (at a vertex).
+ Returns positive, negative or zero value, correspondingly.
+ Optionally, measures a signed distance between
+ the point and the nearest polygon edge (measure_dist=1) */
+CVAPI(double) cvPointPolygonTest( const CvArr* contour,
+ CvPoint2D32f pt, int measure_dist );
+
+/****************************************************************************************\
+* Histogram functions *
+\****************************************************************************************/
+
+/* Creates new histogram */
+CVAPI(CvHistogram*) cvCreateHist( int dims, int* sizes, int type,
+ float** ranges CV_DEFAULT(NULL),
+ int uniform CV_DEFAULT(1));
+
+/* Assignes histogram bin ranges */
+CVAPI(void) cvSetHistBinRanges( CvHistogram* hist, float** ranges,
+ int uniform CV_DEFAULT(1));
+
+/* Creates histogram header for array */
+CVAPI(CvHistogram*) cvMakeHistHeaderForArray(
+ int dims, int* sizes, CvHistogram* hist,
+ float* data, float** ranges CV_DEFAULT(NULL),
+ int uniform CV_DEFAULT(1));
+
+/* Releases histogram */
+CVAPI(void) cvReleaseHist( CvHistogram** hist );
+
+/* Clears all the histogram bins */
+CVAPI(void) cvClearHist( CvHistogram* hist );
+
+/* Finds indices and values of minimum and maximum histogram bins */
+CVAPI(void) cvGetMinMaxHistValue( const CvHistogram* hist,
+ float* min_value, float* max_value,
+ int* min_idx CV_DEFAULT(NULL),
+ int* max_idx CV_DEFAULT(NULL));
+
+
+/* Normalizes histogram by dividing all bins by sum of the bins, multiplied by <factor>.
+ After that sum of histogram bins is equal to <factor> */
+CVAPI(void) cvNormalizeHist( CvHistogram* hist, double factor );
+
+
+/* Clear all histogram bins that are below the threshold */
+CVAPI(void) cvThreshHist( CvHistogram* hist, double threshold );
+
+
+/* Compares two histogram */
+CVAPI(double) cvCompareHist( const CvHistogram* hist1,
+ const CvHistogram* hist2,
+ int method);
+
+/* Copies one histogram to another. Destination histogram is created if
+ the destination pointer is NULL */
+CVAPI(void) cvCopyHist( const CvHistogram* src, CvHistogram** dst );
+
+
+/* Calculates bayesian probabilistic histograms
+ (each or src and dst is an array of <number> histograms */
+CVAPI(void) cvCalcBayesianProb( CvHistogram** src, int number,
+ CvHistogram** dst);
+
+/* Calculates array histogram */
+CVAPI(void) cvCalcArrHist( CvArr** arr, CvHistogram* hist,
+ int accumulate CV_DEFAULT(0),
+ const CvArr* mask CV_DEFAULT(NULL) );
+
+CV_INLINE void cvCalcHist( IplImage** image, CvHistogram* hist,
+ int accumulate CV_DEFAULT(0),
+ const CvArr* mask CV_DEFAULT(NULL) )
+{
+ cvCalcArrHist( (CvArr**)image, hist, accumulate, mask );
+}
+
+/* Calculates back project */
+CVAPI(void) cvCalcArrBackProject( CvArr** image, CvArr* dst,
+ const CvHistogram* hist );
+#define cvCalcBackProject(image, dst, hist) cvCalcArrBackProject((CvArr**)image, dst, hist)
+
+
+/* Does some sort of template matching but compares histograms of
+ template and each window location */
+CVAPI(void) cvCalcArrBackProjectPatch( CvArr** image, CvArr* dst, CvSize range,
+ CvHistogram* hist, int method,
+ double factor );
+#define cvCalcBackProjectPatch( image, dst, range, hist, method, factor ) \
+ cvCalcArrBackProjectPatch( (CvArr**)image, dst, range, hist, method, factor )
+
+
+/* calculates probabilistic density (divides one histogram by another) */
+CVAPI(void) cvCalcProbDensity( const CvHistogram* hist1, const CvHistogram* hist2,
+ CvHistogram* dst_hist, double scale CV_DEFAULT(255) );
+
+/* equalizes histogram of 8-bit single-channel image */
+CVAPI(void) cvEqualizeHist( const CvArr* src, CvArr* dst );
+
+
+/* Applies distance transform to binary image */
+CVAPI(void) cvDistTransform( const CvArr* src, CvArr* dst,
+ int distance_type CV_DEFAULT(CV_DIST_L2),
+ int mask_size CV_DEFAULT(3),
+ const float* mask CV_DEFAULT(NULL),
+ CvArr* labels CV_DEFAULT(NULL),
+ int labelType CV_DEFAULT(CV_DIST_LABEL_CCOMP));
+
+
+/* Applies fixed-level threshold to grayscale image.
+ This is a basic operation applied before retrieving contours */
+CVAPI(double) cvThreshold( const CvArr* src, CvArr* dst,
+ double threshold, double max_value,
+ int threshold_type );
+
+/* Applies adaptive threshold to grayscale image.
+ The two parameters for methods CV_ADAPTIVE_THRESH_MEAN_C and
+ CV_ADAPTIVE_THRESH_GAUSSIAN_C are:
+ neighborhood size (3, 5, 7 etc.),
+ and a constant subtracted from mean (...,-3,-2,-1,0,1,2,3,...) */
+CVAPI(void) cvAdaptiveThreshold( const CvArr* src, CvArr* dst, double max_value,
+ int adaptive_method CV_DEFAULT(CV_ADAPTIVE_THRESH_MEAN_C),
+ int threshold_type CV_DEFAULT(CV_THRESH_BINARY),
+ int block_size CV_DEFAULT(3),
+ double param1 CV_DEFAULT(5));
+
+/* Fills the connected component until the color difference gets large enough */
+CVAPI(void) cvFloodFill( CvArr* image, CvPoint seed_point,
+ CvScalar new_val, CvScalar lo_diff CV_DEFAULT(cvScalarAll(0)),
+ CvScalar up_diff CV_DEFAULT(cvScalarAll(0)),
+ CvConnectedComp* comp CV_DEFAULT(NULL),
+ int flags CV_DEFAULT(4),
+ CvArr* mask CV_DEFAULT(NULL));
+
+/****************************************************************************************\
+* Feature detection *
+\****************************************************************************************/
+
+/* Runs canny edge detector */
+CVAPI(void) cvCanny( const CvArr* image, CvArr* edges, double threshold1,
+ double threshold2, int aperture_size CV_DEFAULT(3) );
+
+/* Calculates constraint image for corner detection
+ Dx^2 * Dyy + Dxx * Dy^2 - 2 * Dx * Dy * Dxy.
+ Applying threshold to the result gives coordinates of corners */
+CVAPI(void) cvPreCornerDetect( const CvArr* image, CvArr* corners,
+ int aperture_size CV_DEFAULT(3) );
+
+/* Calculates eigen values and vectors of 2x2
+ gradient covariation matrix at every image pixel */
+CVAPI(void) cvCornerEigenValsAndVecs( const CvArr* image, CvArr* eigenvv,
+ int block_size, int aperture_size CV_DEFAULT(3) );
+
+/* Calculates minimal eigenvalue for 2x2 gradient covariation matrix at
+ every image pixel */
+CVAPI(void) cvCornerMinEigenVal( const CvArr* image, CvArr* eigenval,
+ int block_size, int aperture_size CV_DEFAULT(3) );
+
+/* Harris corner detector:
+ Calculates det(M) - k*(trace(M)^2), where M is 2x2 gradient covariation matrix for each pixel */
+CVAPI(void) cvCornerHarris( const CvArr* image, CvArr* harris_response,
+ int block_size, int aperture_size CV_DEFAULT(3),
+ double k CV_DEFAULT(0.04) );
+
+/* Adjust corner position using some sort of gradient search */
+CVAPI(void) cvFindCornerSubPix( const CvArr* image, CvPoint2D32f* corners,
+ int count, CvSize win, CvSize zero_zone,
+ CvTermCriteria criteria );
+
+/* Finds a sparse set of points within the selected region
+ that seem to be easy to track */
+CVAPI(void) cvGoodFeaturesToTrack( const CvArr* image, CvArr* eig_image,
+ CvArr* temp_image, CvPoint2D32f* corners,
+ int* corner_count, double quality_level,
+ double min_distance,
+ const CvArr* mask CV_DEFAULT(NULL),
+ int block_size CV_DEFAULT(3),
+ int use_harris CV_DEFAULT(0),
+ double k CV_DEFAULT(0.04) );
+
+/* Finds lines on binary image using one of several methods.
+ line_storage is either memory storage or 1 x <max number of lines> CvMat, its
+ number of columns is changed by the function.
+ method is one of CV_HOUGH_*;
+ rho, theta and threshold are used for each of those methods;
+ param1 ~ line length, param2 ~ line gap - for probabilistic,
+ param1 ~ srn, param2 ~ stn - for multi-scale */
+CVAPI(CvSeq*) cvHoughLines2( CvArr* image, void* line_storage, int method,
+ double rho, double theta, int threshold,
+ double param1 CV_DEFAULT(0), double param2 CV_DEFAULT(0));
+
+/* Finds circles in the image */
+CVAPI(CvSeq*) cvHoughCircles( CvArr* image, void* circle_storage,
+ int method, double dp, double min_dist,
+ double param1 CV_DEFAULT(100),
+ double param2 CV_DEFAULT(100),
+ int min_radius CV_DEFAULT(0),
+ int max_radius CV_DEFAULT(0));
+
+/* Fits a line into set of 2d or 3d points in a robust way (M-estimator technique) */
+CVAPI(void) cvFitLine( const CvArr* points, int dist_type, double param,
+ double reps, double aeps, float* line );
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/imgproc/types_c.h b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/imgproc/types_c.h
new file mode 100644
index 00000000..4aba0a87
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/imgproc/types_c.h
@@ -0,0 +1,640 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_IMGPROC_TYPES_C_H__
+#define __OPENCV_IMGPROC_TYPES_C_H__
+
+#include "opencv2/core/core_c.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Connected component structure */
+typedef struct CvConnectedComp
+{
+ double area; /* area of the connected component */
+ CvScalar value; /* average color of the connected component */
+ CvRect rect; /* ROI of the component */
+ CvSeq* contour; /* optional component boundary
+ (the contour might have child contours corresponding to the holes)*/
+}
+CvConnectedComp;
+
+/* Image smooth methods */
+enum
+{
+ CV_BLUR_NO_SCALE =0,
+ CV_BLUR =1,
+ CV_GAUSSIAN =2,
+ CV_MEDIAN =3,
+ CV_BILATERAL =4
+};
+
+/* Filters used in pyramid decomposition */
+enum
+{
+ CV_GAUSSIAN_5x5 = 7
+};
+
+/* Special filters */
+enum
+{
+ CV_SCHARR =-1,
+ CV_MAX_SOBEL_KSIZE =7
+};
+
+/* Constants for color conversion */
+enum
+{
+ CV_BGR2BGRA =0,
+ CV_RGB2RGBA =CV_BGR2BGRA,
+
+ CV_BGRA2BGR =1,
+ CV_RGBA2RGB =CV_BGRA2BGR,
+
+ CV_BGR2RGBA =2,
+ CV_RGB2BGRA =CV_BGR2RGBA,
+
+ CV_RGBA2BGR =3,
+ CV_BGRA2RGB =CV_RGBA2BGR,
+
+ CV_BGR2RGB =4,
+ CV_RGB2BGR =CV_BGR2RGB,
+
+ CV_BGRA2RGBA =5,
+ CV_RGBA2BGRA =CV_BGRA2RGBA,
+
+ CV_BGR2GRAY =6,
+ CV_RGB2GRAY =7,
+ CV_GRAY2BGR =8,
+ CV_GRAY2RGB =CV_GRAY2BGR,
+ CV_GRAY2BGRA =9,
+ CV_GRAY2RGBA =CV_GRAY2BGRA,
+ CV_BGRA2GRAY =10,
+ CV_RGBA2GRAY =11,
+
+ CV_BGR2BGR565 =12,
+ CV_RGB2BGR565 =13,
+ CV_BGR5652BGR =14,
+ CV_BGR5652RGB =15,
+ CV_BGRA2BGR565 =16,
+ CV_RGBA2BGR565 =17,
+ CV_BGR5652BGRA =18,
+ CV_BGR5652RGBA =19,
+
+ CV_GRAY2BGR565 =20,
+ CV_BGR5652GRAY =21,
+
+ CV_BGR2BGR555 =22,
+ CV_RGB2BGR555 =23,
+ CV_BGR5552BGR =24,
+ CV_BGR5552RGB =25,
+ CV_BGRA2BGR555 =26,
+ CV_RGBA2BGR555 =27,
+ CV_BGR5552BGRA =28,
+ CV_BGR5552RGBA =29,
+
+ CV_GRAY2BGR555 =30,
+ CV_BGR5552GRAY =31,
+
+ CV_BGR2XYZ =32,
+ CV_RGB2XYZ =33,
+ CV_XYZ2BGR =34,
+ CV_XYZ2RGB =35,
+
+ CV_BGR2YCrCb =36,
+ CV_RGB2YCrCb =37,
+ CV_YCrCb2BGR =38,
+ CV_YCrCb2RGB =39,
+
+ CV_BGR2HSV =40,
+ CV_RGB2HSV =41,
+
+ CV_BGR2Lab =44,
+ CV_RGB2Lab =45,
+
+ CV_BayerBG2BGR =46,
+ CV_BayerGB2BGR =47,
+ CV_BayerRG2BGR =48,
+ CV_BayerGR2BGR =49,
+
+ CV_BayerBG2RGB =CV_BayerRG2BGR,
+ CV_BayerGB2RGB =CV_BayerGR2BGR,
+ CV_BayerRG2RGB =CV_BayerBG2BGR,
+ CV_BayerGR2RGB =CV_BayerGB2BGR,
+
+ CV_BGR2Luv =50,
+ CV_RGB2Luv =51,
+ CV_BGR2HLS =52,
+ CV_RGB2HLS =53,
+
+ CV_HSV2BGR =54,
+ CV_HSV2RGB =55,
+
+ CV_Lab2BGR =56,
+ CV_Lab2RGB =57,
+ CV_Luv2BGR =58,
+ CV_Luv2RGB =59,
+ CV_HLS2BGR =60,
+ CV_HLS2RGB =61,
+
+ CV_BayerBG2BGR_VNG =62,
+ CV_BayerGB2BGR_VNG =63,
+ CV_BayerRG2BGR_VNG =64,
+ CV_BayerGR2BGR_VNG =65,
+
+ CV_BayerBG2RGB_VNG =CV_BayerRG2BGR_VNG,
+ CV_BayerGB2RGB_VNG =CV_BayerGR2BGR_VNG,
+ CV_BayerRG2RGB_VNG =CV_BayerBG2BGR_VNG,
+ CV_BayerGR2RGB_VNG =CV_BayerGB2BGR_VNG,
+
+ CV_BGR2HSV_FULL = 66,
+ CV_RGB2HSV_FULL = 67,
+ CV_BGR2HLS_FULL = 68,
+ CV_RGB2HLS_FULL = 69,
+
+ CV_HSV2BGR_FULL = 70,
+ CV_HSV2RGB_FULL = 71,
+ CV_HLS2BGR_FULL = 72,
+ CV_HLS2RGB_FULL = 73,
+
+ CV_LBGR2Lab = 74,
+ CV_LRGB2Lab = 75,
+ CV_LBGR2Luv = 76,
+ CV_LRGB2Luv = 77,
+
+ CV_Lab2LBGR = 78,
+ CV_Lab2LRGB = 79,
+ CV_Luv2LBGR = 80,
+ CV_Luv2LRGB = 81,
+
+ CV_BGR2YUV = 82,
+ CV_RGB2YUV = 83,
+ CV_YUV2BGR = 84,
+ CV_YUV2RGB = 85,
+
+ CV_BayerBG2GRAY = 86,
+ CV_BayerGB2GRAY = 87,
+ CV_BayerRG2GRAY = 88,
+ CV_BayerGR2GRAY = 89,
+
+ //YUV 4:2:0 formats family
+ CV_YUV2RGB_NV12 = 90,
+ CV_YUV2BGR_NV12 = 91,
+ CV_YUV2RGB_NV21 = 92,
+ CV_YUV2BGR_NV21 = 93,
+ CV_YUV420sp2RGB = CV_YUV2RGB_NV21,
+ CV_YUV420sp2BGR = CV_YUV2BGR_NV21,
+
+ CV_YUV2RGBA_NV12 = 94,
+ CV_YUV2BGRA_NV12 = 95,
+ CV_YUV2RGBA_NV21 = 96,
+ CV_YUV2BGRA_NV21 = 97,
+ CV_YUV420sp2RGBA = CV_YUV2RGBA_NV21,
+ CV_YUV420sp2BGRA = CV_YUV2BGRA_NV21,
+
+ CV_YUV2RGB_YV12 = 98,
+ CV_YUV2BGR_YV12 = 99,
+ CV_YUV2RGB_IYUV = 100,
+ CV_YUV2BGR_IYUV = 101,
+ CV_YUV2RGB_I420 = CV_YUV2RGB_IYUV,
+ CV_YUV2BGR_I420 = CV_YUV2BGR_IYUV,
+ CV_YUV420p2RGB = CV_YUV2RGB_YV12,
+ CV_YUV420p2BGR = CV_YUV2BGR_YV12,
+
+ CV_YUV2RGBA_YV12 = 102,
+ CV_YUV2BGRA_YV12 = 103,
+ CV_YUV2RGBA_IYUV = 104,
+ CV_YUV2BGRA_IYUV = 105,
+ CV_YUV2RGBA_I420 = CV_YUV2RGBA_IYUV,
+ CV_YUV2BGRA_I420 = CV_YUV2BGRA_IYUV,
+ CV_YUV420p2RGBA = CV_YUV2RGBA_YV12,
+ CV_YUV420p2BGRA = CV_YUV2BGRA_YV12,
+
+ CV_YUV2GRAY_420 = 106,
+ CV_YUV2GRAY_NV21 = CV_YUV2GRAY_420,
+ CV_YUV2GRAY_NV12 = CV_YUV2GRAY_420,
+ CV_YUV2GRAY_YV12 = CV_YUV2GRAY_420,
+ CV_YUV2GRAY_IYUV = CV_YUV2GRAY_420,
+ CV_YUV2GRAY_I420 = CV_YUV2GRAY_420,
+ CV_YUV420sp2GRAY = CV_YUV2GRAY_420,
+ CV_YUV420p2GRAY = CV_YUV2GRAY_420,
+
+ //YUV 4:2:2 formats family
+ CV_YUV2RGB_UYVY = 107,
+ CV_YUV2BGR_UYVY = 108,
+ //CV_YUV2RGB_VYUY = 109,
+ //CV_YUV2BGR_VYUY = 110,
+ CV_YUV2RGB_Y422 = CV_YUV2RGB_UYVY,
+ CV_YUV2BGR_Y422 = CV_YUV2BGR_UYVY,
+ CV_YUV2RGB_UYNV = CV_YUV2RGB_UYVY,
+ CV_YUV2BGR_UYNV = CV_YUV2BGR_UYVY,
+
+ CV_YUV2RGBA_UYVY = 111,
+ CV_YUV2BGRA_UYVY = 112,
+ //CV_YUV2RGBA_VYUY = 113,
+ //CV_YUV2BGRA_VYUY = 114,
+ CV_YUV2RGBA_Y422 = CV_YUV2RGBA_UYVY,
+ CV_YUV2BGRA_Y422 = CV_YUV2BGRA_UYVY,
+ CV_YUV2RGBA_UYNV = CV_YUV2RGBA_UYVY,
+ CV_YUV2BGRA_UYNV = CV_YUV2BGRA_UYVY,
+
+ CV_YUV2RGB_YUY2 = 115,
+ CV_YUV2BGR_YUY2 = 116,
+ CV_YUV2RGB_YVYU = 117,
+ CV_YUV2BGR_YVYU = 118,
+ CV_YUV2RGB_YUYV = CV_YUV2RGB_YUY2,
+ CV_YUV2BGR_YUYV = CV_YUV2BGR_YUY2,
+ CV_YUV2RGB_YUNV = CV_YUV2RGB_YUY2,
+ CV_YUV2BGR_YUNV = CV_YUV2BGR_YUY2,
+
+ CV_YUV2RGBA_YUY2 = 119,
+ CV_YUV2BGRA_YUY2 = 120,
+ CV_YUV2RGBA_YVYU = 121,
+ CV_YUV2BGRA_YVYU = 122,
+ CV_YUV2RGBA_YUYV = CV_YUV2RGBA_YUY2,
+ CV_YUV2BGRA_YUYV = CV_YUV2BGRA_YUY2,
+ CV_YUV2RGBA_YUNV = CV_YUV2RGBA_YUY2,
+ CV_YUV2BGRA_YUNV = CV_YUV2BGRA_YUY2,
+
+ CV_YUV2GRAY_UYVY = 123,
+ CV_YUV2GRAY_YUY2 = 124,
+ //CV_YUV2GRAY_VYUY = CV_YUV2GRAY_UYVY,
+ CV_YUV2GRAY_Y422 = CV_YUV2GRAY_UYVY,
+ CV_YUV2GRAY_UYNV = CV_YUV2GRAY_UYVY,
+ CV_YUV2GRAY_YVYU = CV_YUV2GRAY_YUY2,
+ CV_YUV2GRAY_YUYV = CV_YUV2GRAY_YUY2,
+ CV_YUV2GRAY_YUNV = CV_YUV2GRAY_YUY2,
+
+ // alpha premultiplication
+ CV_RGBA2mRGBA = 125,
+ CV_mRGBA2RGBA = 126,
+
+ CV_RGB2YUV_I420 = 127,
+ CV_BGR2YUV_I420 = 128,
+ CV_RGB2YUV_IYUV = CV_RGB2YUV_I420,
+ CV_BGR2YUV_IYUV = CV_BGR2YUV_I420,
+
+ CV_RGBA2YUV_I420 = 129,
+ CV_BGRA2YUV_I420 = 130,
+ CV_RGBA2YUV_IYUV = CV_RGBA2YUV_I420,
+ CV_BGRA2YUV_IYUV = CV_BGRA2YUV_I420,
+ CV_RGB2YUV_YV12 = 131,
+ CV_BGR2YUV_YV12 = 132,
+ CV_RGBA2YUV_YV12 = 133,
+ CV_BGRA2YUV_YV12 = 134,
+
+ CV_COLORCVT_MAX = 135
+};
+
+
+/* Sub-pixel interpolation methods */
+enum
+{
+ CV_INTER_NN =0,
+ CV_INTER_LINEAR =1,
+ CV_INTER_CUBIC =2,
+ CV_INTER_AREA =3,
+ CV_INTER_LANCZOS4 =4
+};
+
+/* ... and other image warping flags */
+enum
+{
+ CV_WARP_FILL_OUTLIERS =8,
+ CV_WARP_INVERSE_MAP =16
+};
+
+/* Shapes of a structuring element for morphological operations */
+enum
+{
+ CV_SHAPE_RECT =0,
+ CV_SHAPE_CROSS =1,
+ CV_SHAPE_ELLIPSE =2,
+ CV_SHAPE_CUSTOM =100
+};
+
+/* Morphological operations */
+enum
+{
+ CV_MOP_ERODE =0,
+ CV_MOP_DILATE =1,
+ CV_MOP_OPEN =2,
+ CV_MOP_CLOSE =3,
+ CV_MOP_GRADIENT =4,
+ CV_MOP_TOPHAT =5,
+ CV_MOP_BLACKHAT =6
+};
+
+/* Spatial and central moments */
+typedef struct CvMoments
+{
+ double m00, m10, m01, m20, m11, m02, m30, m21, m12, m03; /* spatial moments */
+ double mu20, mu11, mu02, mu30, mu21, mu12, mu03; /* central moments */
+ double inv_sqrt_m00; /* m00 != 0 ? 1/sqrt(m00) : 0 */
+}
+CvMoments;
+
+/* Hu invariants */
+typedef struct CvHuMoments
+{
+ double hu1, hu2, hu3, hu4, hu5, hu6, hu7; /* Hu invariants */
+}
+CvHuMoments;
+
+/* Template matching methods */
+enum
+{
+ CV_TM_SQDIFF =0,
+ CV_TM_SQDIFF_NORMED =1,
+ CV_TM_CCORR =2,
+ CV_TM_CCORR_NORMED =3,
+ CV_TM_CCOEFF =4,
+ CV_TM_CCOEFF_NORMED =5
+};
+
+typedef float (CV_CDECL * CvDistanceFunction)( const float* a, const float* b, void* user_param );
+
+/* Contour retrieval modes */
+enum
+{
+ CV_RETR_EXTERNAL=0,
+ CV_RETR_LIST=1,
+ CV_RETR_CCOMP=2,
+ CV_RETR_TREE=3,
+ CV_RETR_FLOODFILL=4
+};
+
+/* Contour approximation methods */
+enum
+{
+ CV_CHAIN_CODE=0,
+ CV_CHAIN_APPROX_NONE=1,
+ CV_CHAIN_APPROX_SIMPLE=2,
+ CV_CHAIN_APPROX_TC89_L1=3,
+ CV_CHAIN_APPROX_TC89_KCOS=4,
+ CV_LINK_RUNS=5
+};
+
+/*
+Internal structure that is used for sequental retrieving contours from the image.
+It supports both hierarchical and plane variants of Suzuki algorithm.
+*/
+typedef struct _CvContourScanner* CvContourScanner;
+
+/* Freeman chain reader state */
+typedef struct CvChainPtReader
+{
+ CV_SEQ_READER_FIELDS()
+ char code;
+ CvPoint pt;
+ schar deltas[8][2];
+}
+CvChainPtReader;
+
+/* initializes 8-element array for fast access to 3x3 neighborhood of a pixel */
+#define CV_INIT_3X3_DELTAS( deltas, step, nch ) \
+ ((deltas)[0] = (nch), (deltas)[1] = -(step) + (nch), \
+ (deltas)[2] = -(step), (deltas)[3] = -(step) - (nch), \
+ (deltas)[4] = -(nch), (deltas)[5] = (step) - (nch), \
+ (deltas)[6] = (step), (deltas)[7] = (step) + (nch))
+
+
+/****************************************************************************************\
+* Planar subdivisions *
+\****************************************************************************************/
+
+typedef size_t CvSubdiv2DEdge;
+
+#define CV_QUADEDGE2D_FIELDS() \
+ int flags; \
+ struct CvSubdiv2DPoint* pt[4]; \
+ CvSubdiv2DEdge next[4];
+
+#define CV_SUBDIV2D_POINT_FIELDS()\
+ int flags; \
+ CvSubdiv2DEdge first; \
+ CvPoint2D32f pt; \
+ int id;
+
+#define CV_SUBDIV2D_VIRTUAL_POINT_FLAG (1 << 30)
+
+typedef struct CvQuadEdge2D
+{
+ CV_QUADEDGE2D_FIELDS()
+}
+CvQuadEdge2D;
+
+typedef struct CvSubdiv2DPoint
+{
+ CV_SUBDIV2D_POINT_FIELDS()
+}
+CvSubdiv2DPoint;
+
+#define CV_SUBDIV2D_FIELDS() \
+ CV_GRAPH_FIELDS() \
+ int quad_edges; \
+ int is_geometry_valid; \
+ CvSubdiv2DEdge recent_edge; \
+ CvPoint2D32f topleft; \
+ CvPoint2D32f bottomright;
+
+typedef struct CvSubdiv2D
+{
+ CV_SUBDIV2D_FIELDS()
+}
+CvSubdiv2D;
+
+
+typedef enum CvSubdiv2DPointLocation
+{
+ CV_PTLOC_ERROR = -2,
+ CV_PTLOC_OUTSIDE_RECT = -1,
+ CV_PTLOC_INSIDE = 0,
+ CV_PTLOC_VERTEX = 1,
+ CV_PTLOC_ON_EDGE = 2
+}
+CvSubdiv2DPointLocation;
+
+typedef enum CvNextEdgeType
+{
+ CV_NEXT_AROUND_ORG = 0x00,
+ CV_NEXT_AROUND_DST = 0x22,
+ CV_PREV_AROUND_ORG = 0x11,
+ CV_PREV_AROUND_DST = 0x33,
+ CV_NEXT_AROUND_LEFT = 0x13,
+ CV_NEXT_AROUND_RIGHT = 0x31,
+ CV_PREV_AROUND_LEFT = 0x20,
+ CV_PREV_AROUND_RIGHT = 0x02
+}
+CvNextEdgeType;
+
+/* get the next edge with the same origin point (counterwise) */
+#define CV_SUBDIV2D_NEXT_EDGE( edge ) (((CvQuadEdge2D*)((edge) & ~3))->next[(edge)&3])
+
+
+/* Contour approximation algorithms */
+enum
+{
+ CV_POLY_APPROX_DP = 0
+};
+
+/* Shape matching methods */
+enum
+{
+ CV_CONTOURS_MATCH_I1 =1,
+ CV_CONTOURS_MATCH_I2 =2,
+ CV_CONTOURS_MATCH_I3 =3
+};
+
+/* Shape orientation */
+enum
+{
+ CV_CLOCKWISE =1,
+ CV_COUNTER_CLOCKWISE =2
+};
+
+
+/* Convexity defect */
+typedef struct CvConvexityDefect
+{
+ CvPoint* start; /* point of the contour where the defect begins */
+ CvPoint* end; /* point of the contour where the defect ends */
+ CvPoint* depth_point; /* the farthest from the convex hull point within the defect */
+ float depth; /* distance between the farthest point and the convex hull */
+} CvConvexityDefect;
+
+
+/* Histogram comparison methods */
+enum
+{
+ CV_COMP_CORREL =0,
+ CV_COMP_CHISQR =1,
+ CV_COMP_INTERSECT =2,
+ CV_COMP_BHATTACHARYYA =3,
+ CV_COMP_HELLINGER =CV_COMP_BHATTACHARYYA
+};
+
+/* Mask size for distance transform */
+enum
+{
+ CV_DIST_MASK_3 =3,
+ CV_DIST_MASK_5 =5,
+ CV_DIST_MASK_PRECISE =0
+};
+
+/* Content of output label array: connected components or pixels */
+enum
+{
+ CV_DIST_LABEL_CCOMP = 0,
+ CV_DIST_LABEL_PIXEL = 1
+};
+
+/* Distance types for Distance Transform and M-estimators */
+enum
+{
+ CV_DIST_USER =-1, /* User defined distance */
+ CV_DIST_L1 =1, /* distance = |x1-x2| + |y1-y2| */
+ CV_DIST_L2 =2, /* the simple euclidean distance */
+ CV_DIST_C =3, /* distance = max(|x1-x2|,|y1-y2|) */
+ CV_DIST_L12 =4, /* L1-L2 metric: distance = 2(sqrt(1+x*x/2) - 1)) */
+ CV_DIST_FAIR =5, /* distance = c^2(|x|/c-log(1+|x|/c)), c = 1.3998 */
+ CV_DIST_WELSCH =6, /* distance = c^2/2(1-exp(-(x/c)^2)), c = 2.9846 */
+ CV_DIST_HUBER =7 /* distance = |x|<c ? x^2/2 : c(|x|-c/2), c=1.345 */
+};
+
+
+/* Threshold types */
+enum
+{
+ CV_THRESH_BINARY =0, /* value = value > threshold ? max_value : 0 */
+ CV_THRESH_BINARY_INV =1, /* value = value > threshold ? 0 : max_value */
+ CV_THRESH_TRUNC =2, /* value = value > threshold ? threshold : value */
+ CV_THRESH_TOZERO =3, /* value = value > threshold ? value : 0 */
+ CV_THRESH_TOZERO_INV =4, /* value = value > threshold ? 0 : value */
+ CV_THRESH_MASK =7,
+ CV_THRESH_OTSU =8 /* use Otsu algorithm to choose the optimal threshold value;
+ combine the flag with one of the above CV_THRESH_* values */
+};
+
+/* Adaptive threshold methods */
+enum
+{
+ CV_ADAPTIVE_THRESH_MEAN_C =0,
+ CV_ADAPTIVE_THRESH_GAUSSIAN_C =1
+};
+
+/* FloodFill flags */
+enum
+{
+ CV_FLOODFILL_FIXED_RANGE =(1 << 16),
+ CV_FLOODFILL_MASK_ONLY =(1 << 17)
+};
+
+
+/* Canny edge detector flags */
+enum
+{
+ CV_CANNY_L2_GRADIENT =(1 << 31)
+};
+
+/* Variants of a Hough transform */
+enum
+{
+ CV_HOUGH_STANDARD =0,
+ CV_HOUGH_PROBABILISTIC =1,
+ CV_HOUGH_MULTI_SCALE =2,
+ CV_HOUGH_GRADIENT =3
+};
+
+
+/* Fast search data structures */
+struct CvFeatureTree;
+struct CvLSH;
+struct CvLSHOperations;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/legacy/blobtrack.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/legacy/blobtrack.hpp
new file mode 100644
index 00000000..496b8be2
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/legacy/blobtrack.hpp
@@ -0,0 +1,948 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// Intel License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000, Intel Corporation, all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of Intel Corporation may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+
+#ifndef __OPENCV_VIDEOSURVEILLANCE_H__
+#define __OPENCV_VIDEOSURVEILLANCE_H__
+
+/* Turn off the functionality until cvaux/src/Makefile.am gets updated: */
+//#if _MSC_VER >= 1200
+
+#include "opencv2/core/core_c.h"
+#include <stdio.h>
+
+#if (defined _MSC_VER && _MSC_VER >= 1200) || defined __BORLANDC__
+#define cv_stricmp stricmp
+#define cv_strnicmp strnicmp
+#if defined WINCE
+#define strdup _strdup
+#define stricmp _stricmp
+#endif
+#elif defined __GNUC__ || defined __sun
+#define cv_stricmp strcasecmp
+#define cv_strnicmp strncasecmp
+#else
+#error Do not know how to make case-insensitive string comparison on this platform
+#endif
+
+//struct DefParam;
+struct CvDefParam
+{
+ struct CvDefParam* next;
+ char* pName;
+ char* pComment;
+ double* pDouble;
+ double Double;
+ float* pFloat;
+ float Float;
+ int* pInt;
+ int Int;
+ char** pStr;
+ char* Str;
+};
+
+class CV_EXPORTS CvVSModule
+{
+private: /* Internal data: */
+ CvDefParam* m_pParamList;
+ char* m_pModuleTypeName;
+ char* m_pModuleName;
+ char* m_pNickName;
+protected:
+ int m_Wnd;
+public: /* Constructor and destructor: */
+ CvVSModule();
+ virtual ~CvVSModule();
+private: /* Internal functions: */
+ void FreeParam(CvDefParam** pp);
+ CvDefParam* NewParam(const char* name);
+ CvDefParam* GetParamPtr(int index);
+ CvDefParam* GetParamPtr(const char* name);
+protected: /* INTERNAL INTERFACE */
+ int IsParam(const char* name);
+ void AddParam(const char* name, double* pAddr);
+ void AddParam(const char* name, float* pAddr);
+ void AddParam(const char* name, int* pAddr);
+ void AddParam(const char* name, const char** pAddr);
+ void AddParam(const char* name);
+ void CommentParam(const char* name, const char* pComment);
+ void SetTypeName(const char* name);
+ void SetModuleName(const char* name);
+ void DelParam(const char* name);
+
+public: /* EXTERNAL INTERFACE */
+ const char* GetParamName(int index);
+ const char* GetParamComment(const char* name);
+ double GetParam(const char* name);
+ const char* GetParamStr(const char* name);
+ void SetParam(const char* name, double val);
+ void SetParamStr(const char* name, const char* str);
+ void TransferParamsFromChild(CvVSModule* pM, const char* prefix = NULL);
+ void TransferParamsToChild(CvVSModule* pM, char* prefix = NULL);
+ virtual void ParamUpdate();
+ const char* GetTypeName();
+ int IsModuleTypeName(const char* name);
+ char* GetModuleName();
+ int IsModuleName(const char* name);
+ void SetNickName(const char* pStr);
+ const char* GetNickName();
+ virtual void SaveState(CvFileStorage*);
+ virtual void LoadState(CvFileStorage*, CvFileNode*);
+
+ virtual void Release() = 0;
+};/* CvVMModule */
+
+CV_EXPORTS void cvWriteStruct(CvFileStorage* fs, const char* name, void* addr, const char* desc, int num=1);
+CV_EXPORTS void cvReadStructByName(CvFileStorage* fs, CvFileNode* node, const char* name, void* addr, const char* desc);
+
+/* FOREGROUND DETECTOR INTERFACE */
+class CV_EXPORTS CvFGDetector : public CvVSModule
+{
+public:
+ CvFGDetector();
+ virtual IplImage* GetMask() = 0;
+ /* Process current image: */
+ virtual void Process(IplImage* pImg) = 0;
+ /* Release foreground detector: */
+ virtual void Release() = 0;
+};
+
+CV_EXPORTS void cvReleaseFGDetector(CvFGDetector** ppT );
+CV_EXPORTS CvFGDetector* cvCreateFGDetectorBase(int type, void *param);
+
+
+/* BLOB STRUCTURE*/
+struct CvBlob
+{
+ float x,y; /* blob position */
+ float w,h; /* blob sizes */
+ int ID; /* blob ID */
+};
+
+inline CvBlob cvBlob(float x,float y, float w, float h)
+{
+ CvBlob B = {x,y,w,h,0};
+ return B;
+}
+#define CV_BLOB_MINW 5
+#define CV_BLOB_MINH 5
+#define CV_BLOB_ID(pB) (((CvBlob*)(pB))->ID)
+#define CV_BLOB_CENTER(pB) cvPoint2D32f(((CvBlob*)(pB))->x,((CvBlob*)(pB))->y)
+#define CV_BLOB_X(pB) (((CvBlob*)(pB))->x)
+#define CV_BLOB_Y(pB) (((CvBlob*)(pB))->y)
+#define CV_BLOB_WX(pB) (((CvBlob*)(pB))->w)
+#define CV_BLOB_WY(pB) (((CvBlob*)(pB))->h)
+#define CV_BLOB_RX(pB) (0.5f*CV_BLOB_WX(pB))
+#define CV_BLOB_RY(pB) (0.5f*CV_BLOB_WY(pB))
+#define CV_BLOB_RECT(pB) cvRect(cvRound(((CvBlob*)(pB))->x-CV_BLOB_RX(pB)),cvRound(((CvBlob*)(pB))->y-CV_BLOB_RY(pB)),cvRound(CV_BLOB_WX(pB)),cvRound(CV_BLOB_WY(pB)))
+/* END BLOB STRUCTURE*/
+
+
+/* simple BLOBLIST */
+class CV_EXPORTS CvBlobSeq
+{
+public:
+ CvBlobSeq(int BlobSize = sizeof(CvBlob))
+ {
+ m_pMem = cvCreateMemStorage();
+ m_pSeq = cvCreateSeq(0,sizeof(CvSeq),BlobSize,m_pMem);
+ strcpy(m_pElemFormat,"ffffi");
+ }
+ virtual ~CvBlobSeq()
+ {
+ cvReleaseMemStorage(&m_pMem);
+ };
+ virtual CvBlob* GetBlob(int BlobIndex)
+ {
+ return (CvBlob*)cvGetSeqElem(m_pSeq,BlobIndex);
+ };
+ virtual CvBlob* GetBlobByID(int BlobID)
+ {
+ int i;
+ for(i=0; i<m_pSeq->total; ++i)
+ if(BlobID == CV_BLOB_ID(GetBlob(i)))
+ return GetBlob(i);
+ return NULL;
+ };
+ virtual void DelBlob(int BlobIndex)
+ {
+ cvSeqRemove(m_pSeq,BlobIndex);
+ };
+ virtual void DelBlobByID(int BlobID)
+ {
+ int i;
+ for(i=0; i<m_pSeq->total; ++i)
+ {
+ if(BlobID == CV_BLOB_ID(GetBlob(i)))
+ {
+ DelBlob(i);
+ return;
+ }
+ }
+ };
+ virtual void Clear()
+ {
+ cvClearSeq(m_pSeq);
+ };
+ virtual void AddBlob(CvBlob* pB)
+ {
+ cvSeqPush(m_pSeq,pB);
+ };
+ virtual int GetBlobNum()
+ {
+ return m_pSeq->total;
+ };
+ virtual void Write(CvFileStorage* fs, const char* name)
+ {
+ const char* attr[] = {"dt",m_pElemFormat,NULL};
+ if(fs)
+ {
+ cvWrite(fs,name,m_pSeq,cvAttrList(attr,NULL));
+ }
+ }
+ virtual void Load(CvFileStorage* fs, CvFileNode* node)
+ {
+ if(fs==NULL) return;
+ CvSeq* pSeq = (CvSeq*)cvRead(fs, node);
+ if(pSeq)
+ {
+ int i;
+ cvClearSeq(m_pSeq);
+ for(i=0;i<pSeq->total;++i)
+ {
+ void* pB = cvGetSeqElem( pSeq, i );
+ cvSeqPush( m_pSeq, pB );
+ }
+ }
+ }
+ void AddFormat(const char* str){strcat(m_pElemFormat,str);}
+protected:
+ CvMemStorage* m_pMem;
+ CvSeq* m_pSeq;
+ char m_pElemFormat[1024];
+};
+/* simple BLOBLIST */
+
+
+/* simple TRACKLIST */
+struct CvBlobTrack
+{
+ int TrackID;
+ int StartFrame;
+ CvBlobSeq* pBlobSeq;
+};
+
+class CV_EXPORTS CvBlobTrackSeq
+{
+public:
+ CvBlobTrackSeq(int TrackSize = sizeof(CvBlobTrack));
+ virtual ~CvBlobTrackSeq();
+ virtual CvBlobTrack* GetBlobTrack(int TrackIndex);
+ virtual CvBlobTrack* GetBlobTrackByID(int TrackID);
+ virtual void DelBlobTrack(int TrackIndex);
+ virtual void DelBlobTrackByID(int TrackID);
+ virtual void Clear();
+ virtual void AddBlobTrack(int TrackID, int StartFrame = 0);
+ virtual int GetBlobTrackNum();
+protected:
+ CvMemStorage* m_pMem;
+ CvSeq* m_pSeq;
+};
+
+/* simple TRACKLIST */
+
+
+/* BLOB DETECTOR INTERFACE */
+class CV_EXPORTS CvBlobDetector: public CvVSModule
+{
+public:
+ CvBlobDetector(){SetTypeName("BlobDetector");};
+ /* Try to detect new blob entrance based on foreground mask. */
+ /* pFGMask - image of foreground mask */
+ /* pNewBlob - pointer to CvBlob structure which will be filled if new blob entrance detected */
+ /* pOldBlobList - pointer to blob list which already exist on image */
+ virtual int DetectNewBlob(IplImage* pImg, IplImage* pImgFG, CvBlobSeq* pNewBlobList, CvBlobSeq* pOldBlobList) = 0;
+ /* release blob detector */
+ virtual void Release()=0;
+};
+
+/* Release any blob detector: */
+CV_EXPORTS void cvReleaseBlobDetector(CvBlobDetector** ppBD);
+
+/* Declarations of constructors of implemented modules: */
+CV_EXPORTS CvBlobDetector* cvCreateBlobDetectorSimple();
+CV_EXPORTS CvBlobDetector* cvCreateBlobDetectorCC();
+
+struct CV_EXPORTS CvDetectedBlob : public CvBlob
+{
+ float response;
+};
+
+CV_INLINE CvDetectedBlob cvDetectedBlob( float x, float y, float w, float h, int ID = 0, float response = 0.0F )
+{
+ CvDetectedBlob b;
+ b.x = x; b.y = y; b.w = w; b.h = h; b.ID = ID; b.response = response;
+ return b;
+}
+
+
+class CV_EXPORTS CvObjectDetector
+{
+public:
+ CvObjectDetector( const char* /*detector_file_name*/ = 0 );
+ ~CvObjectDetector();
+
+ /*
+ * Release the current detector and load new detector from file
+ * (if detector_file_name is not 0)
+ * Return true on success:
+ */
+ bool Load( const char* /*detector_file_name*/ = 0 );
+
+ /* Return min detector window size: */
+ CvSize GetMinWindowSize() const;
+
+ /* Return max border: */
+ int GetMaxBorderSize() const;
+
+ /*
+ * Detect the object on the image and push the detected
+ * blobs into <detected_blob_seq> which must be the sequence of <CvDetectedBlob>s
+ */
+ void Detect( const CvArr* /*img*/, /* out */ CvBlobSeq* /*detected_blob_seq*/ = 0 );
+
+protected:
+ class CvObjectDetectorImpl* impl;
+};
+
+
+CV_INLINE CvRect cvRectIntersection( const CvRect r1, const CvRect r2 )
+{
+ CvRect r = cvRect( MAX(r1.x, r2.x), MAX(r1.y, r2.y), 0, 0 );
+
+ r.width = MIN(r1.x + r1.width, r2.x + r2.width) - r.x;
+ r.height = MIN(r1.y + r1.height, r2.y + r2.height) - r.y;
+
+ return r;
+}
+
+
+/*
+ * CvImageDrawer
+ *
+ * Draw on an image the specified ROIs from the source image and
+ * given blobs as ellipses or rectangles:
+ */
+
+struct CvDrawShape
+{
+ enum {RECT, ELLIPSE} shape;
+ CvScalar color;
+};
+
+/*extern const CvDrawShape icv_shape[] =
+{
+ { CvDrawShape::ELLIPSE, CV_RGB(255,0,0) },
+ { CvDrawShape::ELLIPSE, CV_RGB(0,255,0) },
+ { CvDrawShape::ELLIPSE, CV_RGB(0,0,255) },
+ { CvDrawShape::ELLIPSE, CV_RGB(255,255,0) },
+ { CvDrawShape::ELLIPSE, CV_RGB(0,255,255) },
+ { CvDrawShape::ELLIPSE, CV_RGB(255,0,255) }
+};*/
+
+class CV_EXPORTS CvImageDrawer
+{
+public:
+ CvImageDrawer() : m_image(0) {}
+ ~CvImageDrawer() { cvReleaseImage( &m_image ); }
+ void SetShapes( const CvDrawShape* shapes, int num );
+ /* <blob_seq> must be the sequence of <CvDetectedBlob>s */
+ IplImage* Draw( const CvArr* src, CvBlobSeq* blob_seq = 0, const CvSeq* roi_seq = 0 );
+ IplImage* GetImage() { return m_image; }
+protected:
+ //static const int MAX_SHAPES = sizeof(icv_shape) / sizeof(icv_shape[0]);;
+
+ IplImage* m_image;
+ CvDrawShape m_shape[16];
+};
+
+
+
+/* Trajectory generation module: */
+class CV_EXPORTS CvBlobTrackGen: public CvVSModule
+{
+public:
+ CvBlobTrackGen(){SetTypeName("BlobTrackGen");};
+ virtual void SetFileName(char* pFileName) = 0;
+ virtual void AddBlob(CvBlob* pBlob) = 0;
+ virtual void Process(IplImage* pImg = NULL, IplImage* pFG = NULL) = 0;
+ virtual void Release() = 0;
+};
+
+inline void cvReleaseBlobTrackGen(CvBlobTrackGen** pBTGen)
+{
+ if(*pBTGen)(*pBTGen)->Release();
+ *pBTGen = 0;
+}
+
+/* Declarations of constructors of implemented modules: */
+CV_EXPORTS CvBlobTrackGen* cvCreateModuleBlobTrackGen1();
+CV_EXPORTS CvBlobTrackGen* cvCreateModuleBlobTrackGenYML();
+
+
+
+/* BLOB TRACKER INTERFACE */
+class CV_EXPORTS CvBlobTracker: public CvVSModule
+{
+public:
+ CvBlobTracker();
+
+ /* Add new blob to track it and assign to this blob personal ID */
+ /* pBlob - pointer to structure with blob parameters (ID is ignored)*/
+ /* pImg - current image */
+ /* pImgFG - current foreground mask */
+ /* Return pointer to new added blob: */
+ virtual CvBlob* AddBlob(CvBlob* pBlob, IplImage* pImg, IplImage* pImgFG = NULL ) = 0;
+
+ /* Return number of currently tracked blobs: */
+ virtual int GetBlobNum() = 0;
+
+ /* Return pointer to specified by index blob: */
+ virtual CvBlob* GetBlob(int BlobIndex) = 0;
+
+ /* Delete blob by its index: */
+ virtual void DelBlob(int BlobIndex) = 0;
+
+ /* Process current image and track all existed blobs: */
+ virtual void Process(IplImage* pImg, IplImage* pImgFG = NULL) = 0;
+
+ /* Release blob tracker: */
+ virtual void Release() = 0;
+
+
+ /* Process one blob (for multi hypothesis tracing): */
+ virtual void ProcessBlob(int BlobIndex, CvBlob* pBlob, IplImage* /*pImg*/, IplImage* /*pImgFG*/ = NULL);
+
+ /* Get confidence/wieght/probability (0-1) for blob: */
+ virtual double GetConfidence(int /*BlobIndex*/, CvBlob* /*pBlob*/, IplImage* /*pImg*/, IplImage* /*pImgFG*/ = NULL);
+
+ virtual double GetConfidenceList(CvBlobSeq* pBlobList, IplImage* pImg, IplImage* pImgFG = NULL);
+
+ virtual void UpdateBlob(int /*BlobIndex*/, CvBlob* /*pBlob*/, IplImage* /*pImg*/, IplImage* /*pImgFG*/ = NULL);
+
+ /* Update all blob models: */
+ virtual void Update(IplImage* pImg, IplImage* pImgFG = NULL);
+
+ /* Return pointer to blob by its unique ID: */
+ virtual int GetBlobIndexByID(int BlobID);
+
+ /* Return pointer to blob by its unique ID: */
+ virtual CvBlob* GetBlobByID(int BlobID);
+
+ /* Delete blob by its ID: */
+ virtual void DelBlobByID(int BlobID);
+
+ /* Set new parameters for specified (by index) blob: */
+ virtual void SetBlob(int /*BlobIndex*/, CvBlob* /*pBlob*/);
+
+ /* Set new parameters for specified (by ID) blob: */
+ virtual void SetBlobByID(int BlobID, CvBlob* pBlob);
+
+ /* =============== MULTI HYPOTHESIS INTERFACE ================== */
+
+ /* Return number of position hyposetis of currently tracked blob: */
+ virtual int GetBlobHypNum(int /*BlobIdx*/);
+
+ /* Return pointer to specified blob hypothesis by index blob: */
+ virtual CvBlob* GetBlobHyp(int BlobIndex, int /*hypothesis*/);
+
+ /* Set new parameters for specified (by index) blob hyp
+ * (can be called several times for each hyp ):
+ */
+ virtual void SetBlobHyp(int /*BlobIndex*/, CvBlob* /*pBlob*/);
+};
+
+CV_EXPORTS void cvReleaseBlobTracker(CvBlobTracker**ppT );
+/* BLOB TRACKER INTERFACE */
+
+/*BLOB TRACKER ONE INTERFACE */
+class CV_EXPORTS CvBlobTrackerOne : public CvVSModule
+{
+public:
+ virtual void Init(CvBlob* pBlobInit, IplImage* pImg, IplImage* pImgFG = NULL) = 0;
+ virtual CvBlob* Process(CvBlob* pBlobPrev, IplImage* pImg, IplImage* pImgFG = NULL) = 0;
+ virtual void Release() = 0;
+
+ /* Non-required methods: */
+ virtual void SkipProcess(CvBlob* /*pBlobPrev*/, IplImage* /*pImg*/, IplImage* /*pImgFG*/ = NULL){};
+ virtual void Update(CvBlob* /*pBlob*/, IplImage* /*pImg*/, IplImage* /*pImgFG*/ = NULL){};
+ virtual void SetCollision(int /*CollisionFlag*/){}; /* call in case of blob collision situation*/
+ virtual double GetConfidence(CvBlob* /*pBlob*/, IplImage* /*pImg*/,
+ IplImage* /*pImgFG*/ = NULL, IplImage* /*pImgUnusedReg*/ = NULL)
+ {
+ return 1;
+ };
+};
+inline void cvReleaseBlobTrackerOne(CvBlobTrackerOne **ppT )
+{
+ ppT[0]->Release();
+ ppT[0] = 0;
+}
+CV_EXPORTS CvBlobTracker* cvCreateBlobTrackerList(CvBlobTrackerOne* (*create)());
+/*BLOB TRACKER ONE INTERFACE */
+
+/* Declarations of constructors of implemented modules: */
+
+/* Some declarations for specific MeanShift tracker: */
+#define PROFILE_EPANECHNIKOV 0
+#define PROFILE_DOG 1
+struct CvBlobTrackerParamMS
+{
+ int noOfSigBits;
+ int appearance_profile;
+ int meanshift_profile;
+ float sigma;
+};
+
+CV_EXPORTS CvBlobTracker* cvCreateBlobTrackerMS1(CvBlobTrackerParamMS* param);
+CV_EXPORTS CvBlobTracker* cvCreateBlobTrackerMS2(CvBlobTrackerParamMS* param);
+CV_EXPORTS CvBlobTracker* cvCreateBlobTrackerMS1ByList();
+
+/* Some declarations for specific Likelihood tracker: */
+struct CvBlobTrackerParamLH
+{
+ int HistType; /* see Prob.h */
+ int ScaleAfter;
+};
+
+/* Without scale optimization: */
+CV_EXPORTS CvBlobTracker* cvCreateBlobTrackerLHR(CvBlobTrackerParamLH* /*param*/ = NULL);
+
+/* With scale optimization: */
+CV_EXPORTS CvBlobTracker* cvCreateBlobTrackerLHRS(CvBlobTrackerParamLH* /*param*/ = NULL);
+
+/* Simple blob tracker based on connected component tracking: */
+CV_EXPORTS CvBlobTracker* cvCreateBlobTrackerCC();
+
+/* Connected component tracking and mean-shift particle filter collion-resolver: */
+CV_EXPORTS CvBlobTracker* cvCreateBlobTrackerCCMSPF();
+
+/* Blob tracker that integrates meanshift and connected components: */
+CV_EXPORTS CvBlobTracker* cvCreateBlobTrackerMSFG();
+CV_EXPORTS CvBlobTracker* cvCreateBlobTrackerMSFGS();
+
+/* Meanshift without connected-components */
+CV_EXPORTS CvBlobTracker* cvCreateBlobTrackerMS();
+
+/* Particle filtering via Bhattacharya coefficient, which */
+/* is roughly the dot-product of two probability densities. */
+/* See: Real-Time Tracking of Non-Rigid Objects using Mean Shift */
+/* Comanicius, Ramesh, Meer, 2000, 8p */
+/* http://citeseer.ist.psu.edu/321441.html */
+CV_EXPORTS CvBlobTracker* cvCreateBlobTrackerMSPF();
+
+/* =========== tracker integrators trackers =============*/
+
+/* Integrator based on Particle Filtering method: */
+//CV_EXPORTS CvBlobTracker* cvCreateBlobTrackerIPF();
+
+/* Rule based integrator: */
+//CV_EXPORTS CvBlobTracker* cvCreateBlobTrackerIRB();
+
+/* Integrator based on data fusion using particle filtering: */
+//CV_EXPORTS CvBlobTracker* cvCreateBlobTrackerIPFDF();
+
+
+
+
+/* Trajectory postprocessing module: */
+class CV_EXPORTS CvBlobTrackPostProc: public CvVSModule
+{
+public:
+ CvBlobTrackPostProc(){SetTypeName("BlobTrackPostProc");};
+ virtual void AddBlob(CvBlob* pBlob) = 0;
+ virtual void Process() = 0;
+ virtual int GetBlobNum() = 0;
+ virtual CvBlob* GetBlob(int index) = 0;
+ virtual void Release() = 0;
+
+ /* Additional functionality: */
+ virtual CvBlob* GetBlobByID(int BlobID)
+ {
+ int i;
+ for(i=GetBlobNum();i>0;i--)
+ {
+ CvBlob* pB=GetBlob(i-1);
+ if(pB->ID==BlobID) return pB;
+ }
+ return NULL;
+ };
+};
+
+inline void cvReleaseBlobTrackPostProc(CvBlobTrackPostProc** pBTPP)
+{
+ if(pBTPP == NULL) return;
+ if(*pBTPP)(*pBTPP)->Release();
+ *pBTPP = 0;
+}
+
+/* Trajectory generation module: */
+class CV_EXPORTS CvBlobTrackPostProcOne: public CvVSModule
+{
+public:
+ CvBlobTrackPostProcOne(){SetTypeName("BlobTrackPostOne");};
+ virtual CvBlob* Process(CvBlob* pBlob) = 0;
+ virtual void Release() = 0;
+};
+
+/* Create blob tracking post processing module based on simle module: */
+CV_EXPORTS CvBlobTrackPostProc* cvCreateBlobTrackPostProcList(CvBlobTrackPostProcOne* (*create)());
+
+
+/* Declarations of constructors of implemented modules: */
+CV_EXPORTS CvBlobTrackPostProc* cvCreateModuleBlobTrackPostProcKalman();
+CV_EXPORTS CvBlobTrackPostProc* cvCreateModuleBlobTrackPostProcTimeAverRect();
+CV_EXPORTS CvBlobTrackPostProc* cvCreateModuleBlobTrackPostProcTimeAverExp();
+
+
+/* PREDICTORS */
+/* blob PREDICTOR */
+class CvBlobTrackPredictor: public CvVSModule
+{
+public:
+ CvBlobTrackPredictor(){SetTypeName("BlobTrackPredictor");};
+ virtual CvBlob* Predict() = 0;
+ virtual void Update(CvBlob* pBlob) = 0;
+ virtual void Release() = 0;
+};
+CV_EXPORTS CvBlobTrackPredictor* cvCreateModuleBlobTrackPredictKalman();
+
+
+
+/* Trajectory analyser module: */
+class CV_EXPORTS CvBlobTrackAnalysis: public CvVSModule
+{
+public:
+ CvBlobTrackAnalysis(){SetTypeName("BlobTrackAnalysis");};
+ virtual void AddBlob(CvBlob* pBlob) = 0;
+ virtual void Process(IplImage* pImg, IplImage* pFG) = 0;
+ virtual float GetState(int BlobID) = 0;
+ /* return 0 if trajectory is normal
+ return >0 if trajectory abnormal */
+ virtual const char* GetStateDesc(int /*BlobID*/){return NULL;};
+ virtual void SetFileName(char* /*DataBaseName*/){};
+ virtual void Release() = 0;
+};
+
+
+inline void cvReleaseBlobTrackAnalysis(CvBlobTrackAnalysis** pBTPP)
+{
+ if(pBTPP == NULL) return;
+ if(*pBTPP)(*pBTPP)->Release();
+ *pBTPP = 0;
+}
+
+/* Feature-vector generation module: */
+class CV_EXPORTS CvBlobTrackFVGen : public CvVSModule
+{
+public:
+ CvBlobTrackFVGen(){SetTypeName("BlobTrackFVGen");};
+ virtual void AddBlob(CvBlob* pBlob) = 0;
+ virtual void Process(IplImage* pImg, IplImage* pFG) = 0;
+ virtual void Release() = 0;
+ virtual int GetFVSize() = 0;
+ virtual int GetFVNum() = 0;
+ virtual float* GetFV(int index, int* pFVID) = 0; /* Returns pointer to FV, if return 0 then FV not created */
+ virtual float* GetFVVar(){return NULL;}; /* Returns pointer to array of variation of values of FV, if returns 0 then FVVar does not exist. */
+ virtual float* GetFVMin() = 0; /* Returns pointer to array of minimal values of FV, if returns 0 then FVrange does not exist */
+ virtual float* GetFVMax() = 0; /* Returns pointer to array of maximal values of FV, if returns 0 then FVrange does not exist */
+};
+
+
+/* Trajectory Analyser module: */
+class CV_EXPORTS CvBlobTrackAnalysisOne
+{
+public:
+ virtual ~CvBlobTrackAnalysisOne() {};
+ virtual int Process(CvBlob* pBlob, IplImage* pImg, IplImage* pFG) = 0;
+ /* return 0 if trajectory is normal
+ return >0 if trajectory abnormal */
+ virtual void Release() = 0;
+};
+
+/* Create blob tracking post processing module based on simle module: */
+CV_EXPORTS CvBlobTrackAnalysis* cvCreateBlobTrackAnalysisList(CvBlobTrackAnalysisOne* (*create)());
+
+/* Declarations of constructors of implemented modules: */
+
+/* Based on histogram analysis of 2D FV (x,y): */
+CV_EXPORTS CvBlobTrackAnalysis* cvCreateModuleBlobTrackAnalysisHistP();
+
+/* Based on histogram analysis of 4D FV (x,y,vx,vy): */
+CV_EXPORTS CvBlobTrackAnalysis* cvCreateModuleBlobTrackAnalysisHistPV();
+
+/* Based on histogram analysis of 5D FV (x,y,vx,vy,state): */
+CV_EXPORTS CvBlobTrackAnalysis* cvCreateModuleBlobTrackAnalysisHistPVS();
+
+/* Based on histogram analysis of 4D FV (startpos,stoppos): */
+CV_EXPORTS CvBlobTrackAnalysis* cvCreateModuleBlobTrackAnalysisHistSS();
+
+
+
+/* Based on SVM classifier analysis of 2D FV (x,y): */
+//CV_EXPORTS CvBlobTrackAnalysis* cvCreateModuleBlobTrackAnalysisSVMP();
+
+/* Based on SVM classifier analysis of 4D FV (x,y,vx,vy): */
+//CV_EXPORTS CvBlobTrackAnalysis* cvCreateModuleBlobTrackAnalysisSVMPV();
+
+/* Based on SVM classifier analysis of 5D FV (x,y,vx,vy,state): */
+//CV_EXPORTS CvBlobTrackAnalysis* cvCreateModuleBlobTrackAnalysisSVMPVS();
+
+/* Based on SVM classifier analysis of 4D FV (startpos,stoppos): */
+//CV_EXPORTS CvBlobTrackAnalysis* cvCreateModuleBlobTrackAnalysisSVMSS();
+
+/* Track analysis based on distance between tracks: */
+CV_EXPORTS CvBlobTrackAnalysis* cvCreateModuleBlobTrackAnalysisTrackDist();
+
+/* Analyzer based on reation Road and height map: */
+//CV_EXPORTS CvBlobTrackAnalysis* cvCreateModuleBlobTrackAnalysis3DRoadMap();
+
+/* Analyzer that makes OR decision using set of analyzers: */
+CV_EXPORTS CvBlobTrackAnalysis* cvCreateModuleBlobTrackAnalysisIOR();
+
+/* Estimator of human height: */
+class CV_EXPORTS CvBlobTrackAnalysisHeight: public CvBlobTrackAnalysis
+{
+public:
+ virtual double GetHeight(CvBlob* pB) = 0;
+};
+//CV_EXPORTS CvBlobTrackAnalysisHeight* cvCreateModuleBlobTrackAnalysisHeightScale();
+
+
+
+/* AUTO BLOB TRACKER INTERFACE -- pipeline of 3 modules: */
+class CV_EXPORTS CvBlobTrackerAuto: public CvVSModule
+{
+public:
+ CvBlobTrackerAuto(){SetTypeName("BlobTrackerAuto");};
+ virtual void Process(IplImage* pImg, IplImage* pMask = NULL) = 0;
+ virtual CvBlob* GetBlob(int index) = 0;
+ virtual CvBlob* GetBlobByID(int ID) = 0;
+ virtual int GetBlobNum() = 0;
+ virtual IplImage* GetFGMask(){return NULL;};
+ virtual float GetState(int BlobID) = 0;
+ virtual const char* GetStateDesc(int BlobID) = 0;
+ /* return 0 if trajectory is normal;
+ * return >0 if trajectory abnormal. */
+ virtual void Release() = 0;
+};
+inline void cvReleaseBlobTrackerAuto(CvBlobTrackerAuto** ppT)
+{
+ ppT[0]->Release();
+ ppT[0] = 0;
+}
+/* END AUTO BLOB TRACKER INTERFACE */
+
+
+/* Constructor functions and data for specific BlobTRackerAuto modules: */
+
+/* Parameters of blobtracker auto ver1: */
+struct CvBlobTrackerAutoParam1
+{
+ int FGTrainFrames; /* Number of frames needed for FG (foreground) detector to train. */
+
+ CvFGDetector* pFG; /* FGDetector module. If this field is NULL the Process FG mask is used. */
+
+ CvBlobDetector* pBD; /* Selected blob detector module. */
+ /* If this field is NULL default blobdetector module will be created. */
+
+ CvBlobTracker* pBT; /* Selected blob tracking module. */
+ /* If this field is NULL default blobtracker module will be created. */
+
+ CvBlobTrackGen* pBTGen; /* Selected blob trajectory generator. */
+ /* If this field is NULL no generator is used. */
+
+ CvBlobTrackPostProc* pBTPP; /* Selected blob trajectory postprocessing module. */
+ /* If this field is NULL no postprocessing is done. */
+
+ int UsePPData;
+
+ CvBlobTrackAnalysis* pBTA; /* Selected blob trajectory analysis module. */
+ /* If this field is NULL no track analysis is done. */
+};
+
+/* Create blob tracker auto ver1: */
+CV_EXPORTS CvBlobTrackerAuto* cvCreateBlobTrackerAuto1(CvBlobTrackerAutoParam1* param = NULL);
+
+/* Simple loader for many auto trackers by its type : */
+inline CvBlobTrackerAuto* cvCreateBlobTrackerAuto(int type, void* param)
+{
+ if(type == 0) return cvCreateBlobTrackerAuto1((CvBlobTrackerAutoParam1*)param);
+ return 0;
+}
+
+
+
+struct CvTracksTimePos
+{
+ int len1,len2;
+ int beg1,beg2;
+ int end1,end2;
+ int comLen; //common length for two tracks
+ int shift1,shift2;
+};
+
+/*CV_EXPORTS int cvCompareTracks( CvBlobTrackSeq *groundTruth,
+ CvBlobTrackSeq *result,
+ FILE *file);*/
+
+
+/* Constructor functions: */
+
+CV_EXPORTS void cvCreateTracks_One(CvBlobTrackSeq *TS);
+CV_EXPORTS void cvCreateTracks_Same(CvBlobTrackSeq *TS1, CvBlobTrackSeq *TS2);
+CV_EXPORTS void cvCreateTracks_AreaErr(CvBlobTrackSeq *TS1, CvBlobTrackSeq *TS2, int addW, int addH);
+
+
+/* HIST API */
+class CV_EXPORTS CvProb
+{
+public:
+ virtual ~CvProb() {};
+
+ /* Calculate probability value: */
+ virtual double Value(int* /*comp*/, int /*x*/ = 0, int /*y*/ = 0){return -1;};
+
+ /* Update histograpp Pnew = (1-W)*Pold + W*Padd*/
+ /* W weight of new added prob */
+ /* comps - matrix of new fetature vectors used to update prob */
+ virtual void AddFeature(float W, int* comps, int x =0, int y = 0) = 0;
+ virtual void Scale(float factor = 0, int x = -1, int y = -1) = 0;
+ virtual void Release() = 0;
+};
+inline void cvReleaseProb(CvProb** ppProb){ppProb[0]->Release();ppProb[0]=NULL;}
+/* HIST API */
+
+/* Some Prob: */
+CV_EXPORTS CvProb* cvCreateProbS(int dim, CvSize size, int sample_num);
+CV_EXPORTS CvProb* cvCreateProbMG(int dim, CvSize size, int sample_num);
+CV_EXPORTS CvProb* cvCreateProbMG2(int dim, CvSize size, int sample_num);
+CV_EXPORTS CvProb* cvCreateProbHist(int dim, CvSize size);
+
+#define CV_BT_HIST_TYPE_S 0
+#define CV_BT_HIST_TYPE_MG 1
+#define CV_BT_HIST_TYPE_MG2 2
+#define CV_BT_HIST_TYPE_H 3
+inline CvProb* cvCreateProb(int type, int dim, CvSize size = cvSize(1,1), void* /*param*/ = NULL)
+{
+ if(type == CV_BT_HIST_TYPE_S) return cvCreateProbS(dim, size, -1);
+ if(type == CV_BT_HIST_TYPE_MG) return cvCreateProbMG(dim, size, -1);
+ if(type == CV_BT_HIST_TYPE_MG2) return cvCreateProbMG2(dim, size, -1);
+ if(type == CV_BT_HIST_TYPE_H) return cvCreateProbHist(dim, size);
+ return NULL;
+}
+
+
+
+/* Noise type definitions: */
+#define CV_NOISE_NONE 0
+#define CV_NOISE_GAUSSIAN 1
+#define CV_NOISE_UNIFORM 2
+#define CV_NOISE_SPECKLE 3
+#define CV_NOISE_SALT_AND_PEPPER 4
+
+/* Add some noise to image: */
+/* pImg - (input) image without noise */
+/* pImg - (output) image with noise */
+/* noise_type - type of added noise */
+/* CV_NOISE_GAUSSIAN - pImg += n , n - is gaussian noise with Ampl standart deviation */
+/* CV_NOISE_UNIFORM - pImg += n , n - is uniform noise with Ampl standart deviation */
+/* CV_NOISE_SPECKLE - pImg += n*pImg , n - is gaussian noise with Ampl standart deviation */
+/* CV_NOISE_SALT_AND_PAPPER - pImg = pImg with blacked and whited pixels,
+ Ampl is density of brocken pixels (0-there are not broken pixels, 1 - all pixels are broken)*/
+/* Ampl - "amplitude" of noise */
+//CV_EXPORTS void cvAddNoise(IplImage* pImg, int noise_type, double Ampl, CvRNG* rnd_state = NULL);
+
+/*================== GENERATOR OF TEST VIDEO SEQUENCE ===================== */
+typedef void CvTestSeq;
+
+/* pConfigfile - Name of file (yml or xml) with description of test sequence */
+/* videos - array of names of test videos described in "pConfigfile" file */
+/* numvideos - size of "videos" array */
+CV_EXPORTS CvTestSeq* cvCreateTestSeq(char* pConfigfile, char** videos, int numvideo, float Scale = 1, int noise_type = CV_NOISE_NONE, double noise_ampl = 0);
+CV_EXPORTS void cvReleaseTestSeq(CvTestSeq** ppTestSeq);
+
+/* Generate next frame from test video seq and return pointer to it: */
+CV_EXPORTS IplImage* cvTestSeqQueryFrame(CvTestSeq* pTestSeq);
+
+/* Return pointer to current foreground mask: */
+CV_EXPORTS IplImage* cvTestSeqGetFGMask(CvTestSeq* pTestSeq);
+
+/* Return pointer to current image: */
+CV_EXPORTS IplImage* cvTestSeqGetImage(CvTestSeq* pTestSeq);
+
+/* Return frame size of result test video: */
+CV_EXPORTS CvSize cvTestSeqGetImageSize(CvTestSeq* pTestSeq);
+
+/* Return number of frames result test video: */
+CV_EXPORTS int cvTestSeqFrameNum(CvTestSeq* pTestSeq);
+
+/* Return number of existing objects.
+ * This is general number of any objects.
+ * For example number of trajectories may be equal or less than returned value:
+ */
+CV_EXPORTS int cvTestSeqGetObjectNum(CvTestSeq* pTestSeq);
+
+/* Return 0 if there is not position for current defined on current frame */
+/* Return 1 if there is object position and pPos was filled */
+CV_EXPORTS int cvTestSeqGetObjectPos(CvTestSeq* pTestSeq, int ObjIndex, CvPoint2D32f* pPos);
+CV_EXPORTS int cvTestSeqGetObjectSize(CvTestSeq* pTestSeq, int ObjIndex, CvPoint2D32f* pSize);
+
+/* Add noise to final image: */
+CV_EXPORTS void cvTestSeqAddNoise(CvTestSeq* pTestSeq, int noise_type = CV_NOISE_NONE, double noise_ampl = 0);
+
+/* Add Intensity variation: */
+CV_EXPORTS void cvTestSeqAddIntensityVariation(CvTestSeq* pTestSeq, float DI_per_frame, float MinI, float MaxI);
+CV_EXPORTS void cvTestSeqSetFrame(CvTestSeq* pTestSeq, int n);
+
+#endif
+
+/* End of file. */
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/legacy/compat.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/legacy/compat.hpp
new file mode 100644
index 00000000..5b5495ed
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/legacy/compat.hpp
@@ -0,0 +1,740 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// Intel License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright( C) 2000, Intel Corporation, all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of Intel Corporation may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+//(including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort(including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+/*
+ A few macros and definitions for backward compatibility
+ with the previous versions of OpenCV. They are obsolete and
+ are likely to be removed in future. To check whether your code
+ uses any of these, define CV_NO_BACKWARD_COMPATIBILITY before
+ including cv.h.
+*/
+
+#ifndef __OPENCV_COMPAT_HPP__
+#define __OPENCV_COMPAT_HPP__
+
+#include "opencv2/core/core_c.h"
+#include "opencv2/imgproc/types_c.h"
+
+#include <math.h>
+#include <string.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef int CvMatType;
+typedef int CvDisMaskType;
+typedef CvMat CvMatArray;
+
+typedef int CvThreshType;
+typedef int CvAdaptiveThreshMethod;
+typedef int CvCompareMethod;
+typedef int CvFontFace;
+typedef int CvPolyApproxMethod;
+typedef int CvContoursMatchMethod;
+typedef int CvContourTreesMatchMethod;
+typedef int CvCoeffType;
+typedef int CvRodriguesType;
+typedef int CvElementShape;
+typedef int CvMorphOp;
+typedef int CvTemplMatchMethod;
+
+typedef CvPoint2D64f CvPoint2D64d;
+typedef CvPoint3D64f CvPoint3D64d;
+
+enum
+{
+ CV_MAT32F = CV_32FC1,
+ CV_MAT3x1_32F = CV_32FC1,
+ CV_MAT4x1_32F = CV_32FC1,
+ CV_MAT3x3_32F = CV_32FC1,
+ CV_MAT4x4_32F = CV_32FC1,
+
+ CV_MAT64D = CV_64FC1,
+ CV_MAT3x1_64D = CV_64FC1,
+ CV_MAT4x1_64D = CV_64FC1,
+ CV_MAT3x3_64D = CV_64FC1,
+ CV_MAT4x4_64D = CV_64FC1
+};
+
+enum
+{
+ IPL_GAUSSIAN_5x5 = 7
+};
+
+typedef CvBox2D CvBox2D32f;
+
+/* allocation/deallocation macros */
+#define cvCreateImageData cvCreateData
+#define cvReleaseImageData cvReleaseData
+#define cvSetImageData cvSetData
+#define cvGetImageRawData cvGetRawData
+
+#define cvmAlloc cvCreateData
+#define cvmFree cvReleaseData
+#define cvmAllocArray cvCreateData
+#define cvmFreeArray cvReleaseData
+
+#define cvIntegralImage cvIntegral
+#define cvMatchContours cvMatchShapes
+
+CV_EXPORTS CvMat cvMatArray( int rows, int cols, int type,
+ int count, void* data CV_DEFAULT(0));
+
+#define cvUpdateMHIByTime cvUpdateMotionHistory
+
+#define cvAccMask cvAcc
+#define cvSquareAccMask cvSquareAcc
+#define cvMultiplyAccMask cvMultiplyAcc
+#define cvRunningAvgMask(imgY, imgU, mask, alpha) cvRunningAvg(imgY, imgU, alpha, mask)
+
+#define cvSetHistThresh cvSetHistBinRanges
+#define cvCalcHistMask(img, mask, hist, doNotClear) cvCalcHist(img, hist, doNotClear, mask)
+
+CV_EXPORTS double cvMean( const CvArr* image, const CvArr* mask CV_DEFAULT(0));
+CV_EXPORTS double cvSumPixels( const CvArr* image );
+CV_EXPORTS void cvMean_StdDev( const CvArr* image, double* mean, double* sdv,
+ const CvArr* mask CV_DEFAULT(0));
+
+CV_EXPORTS void cvmPerspectiveProject( const CvMat* mat, const CvArr* src, CvArr* dst );
+CV_EXPORTS void cvFillImage( CvArr* mat, double color );
+
+#define cvCvtPixToPlane cvSplit
+#define cvCvtPlaneToPix cvMerge
+
+typedef struct CvRandState
+{
+ CvRNG state; /* RNG state (the current seed and carry)*/
+ int disttype; /* distribution type */
+ CvScalar param[2]; /* parameters of RNG */
+} CvRandState;
+
+/* Changes RNG range while preserving RNG state */
+CV_EXPORTS void cvRandSetRange( CvRandState* state, double param1,
+ double param2, int index CV_DEFAULT(-1));
+
+CV_EXPORTS void cvRandInit( CvRandState* state, double param1,
+ double param2, int seed,
+ int disttype CV_DEFAULT(CV_RAND_UNI));
+
+/* Fills array with random numbers */
+CV_EXPORTS void cvRand( CvRandState* state, CvArr* arr );
+
+#define cvRandNext( _state ) cvRandInt( &(_state)->state )
+
+CV_EXPORTS void cvbRand( CvRandState* state, float* dst, int len );
+
+CV_EXPORTS void cvbCartToPolar( const float* y, const float* x,
+ float* magnitude, float* angle, int len );
+CV_EXPORTS void cvbFastArctan( const float* y, const float* x, float* angle, int len );
+CV_EXPORTS void cvbSqrt( const float* x, float* y, int len );
+CV_EXPORTS void cvbInvSqrt( const float* x, float* y, int len );
+CV_EXPORTS void cvbReciprocal( const float* x, float* y, int len );
+CV_EXPORTS void cvbFastExp( const float* x, double* y, int len );
+CV_EXPORTS void cvbFastLog( const double* x, float* y, int len );
+
+CV_EXPORTS CvRect cvContourBoundingRect( void* point_set, int update CV_DEFAULT(0));
+
+CV_EXPORTS double cvPseudoInverse( const CvArr* src, CvArr* dst );
+#define cvPseudoInv cvPseudoInverse
+
+#define cvContourMoments( contour, moments ) cvMoments( contour, moments, 0 )
+
+#define cvGetPtrAt cvPtr2D
+#define cvGetAt cvGet2D
+#define cvSetAt(arr,val,y,x) cvSet2D((arr),(y),(x),(val))
+
+#define cvMeanMask cvMean
+#define cvMean_StdDevMask(img,mask,mean,sdv) cvMean_StdDev(img,mean,sdv,mask)
+
+#define cvNormMask(imgA,imgB,mask,normType) cvNorm(imgA,imgB,normType,mask)
+
+#define cvMinMaxLocMask(img, mask, min_val, max_val, min_loc, max_loc) \
+ cvMinMaxLoc(img, min_val, max_val, min_loc, max_loc, mask)
+
+#define cvRemoveMemoryManager cvSetMemoryManager
+
+#define cvmSetZero( mat ) cvSetZero( mat )
+#define cvmSetIdentity( mat ) cvSetIdentity( mat )
+#define cvmAdd( src1, src2, dst ) cvAdd( src1, src2, dst, 0 )
+#define cvmSub( src1, src2, dst ) cvSub( src1, src2, dst, 0 )
+#define cvmCopy( src, dst ) cvCopy( src, dst, 0 )
+#define cvmMul( src1, src2, dst ) cvMatMulAdd( src1, src2, 0, dst )
+#define cvmTranspose( src, dst ) cvT( src, dst )
+#define cvmInvert( src, dst ) cvInv( src, dst )
+#define cvmMahalanobis(vec1, vec2, mat) cvMahalanobis( vec1, vec2, mat )
+#define cvmDotProduct( vec1, vec2 ) cvDotProduct( vec1, vec2 )
+#define cvmCrossProduct(vec1, vec2,dst) cvCrossProduct( vec1, vec2, dst )
+#define cvmTrace( mat ) (cvTrace( mat )).val[0]
+#define cvmMulTransposed( src, dst, order ) cvMulTransposed( src, dst, order )
+#define cvmEigenVV( mat, evec, eval, eps) cvEigenVV( mat, evec, eval, eps )
+#define cvmDet( mat ) cvDet( mat )
+#define cvmScale( src, dst, scale ) cvScale( src, dst, scale )
+
+#define cvCopyImage( src, dst ) cvCopy( src, dst, 0 )
+#define cvReleaseMatHeader cvReleaseMat
+
+/* Calculates exact convex hull of 2d point set */
+CV_EXPORTS void cvConvexHull( CvPoint* points, int num_points,
+ CvRect* bound_rect,
+ int orientation, int* hull, int* hullsize );
+
+
+CV_EXPORTS void cvMinAreaRect( CvPoint* points, int n,
+ int left, int bottom,
+ int right, int top,
+ CvPoint2D32f* anchor,
+ CvPoint2D32f* vect1,
+ CvPoint2D32f* vect2 );
+
+typedef int CvDisType;
+typedef int CvChainApproxMethod;
+typedef int CvContourRetrievalMode;
+
+CV_EXPORTS void cvFitLine3D( CvPoint3D32f* points, int count, int dist,
+ void *param, float reps, float aeps, float* line );
+
+/* Fits a line into set of 2d points in a robust way (M-estimator technique) */
+CV_EXPORTS void cvFitLine2D( CvPoint2D32f* points, int count, int dist,
+ void *param, float reps, float aeps, float* line );
+
+CV_EXPORTS void cvFitEllipse( const CvPoint2D32f* points, int count, CvBox2D* box );
+
+/* Projects 2d points to one of standard coordinate planes
+ (i.e. removes one of coordinates) */
+CV_EXPORTS void cvProject3D( CvPoint3D32f* points3D, int count,
+ CvPoint2D32f* points2D,
+ int xIndx CV_DEFAULT(0),
+ int yIndx CV_DEFAULT(1));
+
+/* Retrieves value of the particular bin
+ of x-dimensional (x=1,2,3,...) histogram */
+#define cvQueryHistValue_1D( hist, idx0 ) \
+ ((float)cvGetReal1D( (hist)->bins, (idx0)))
+#define cvQueryHistValue_2D( hist, idx0, idx1 ) \
+ ((float)cvGetReal2D( (hist)->bins, (idx0), (idx1)))
+#define cvQueryHistValue_3D( hist, idx0, idx1, idx2 ) \
+ ((float)cvGetReal3D( (hist)->bins, (idx0), (idx1), (idx2)))
+#define cvQueryHistValue_nD( hist, idx ) \
+ ((float)cvGetRealND( (hist)->bins, (idx)))
+
+/* Returns pointer to the particular bin of x-dimesional histogram.
+ For sparse histogram the bin is created if it didn't exist before */
+#define cvGetHistValue_1D( hist, idx0 ) \
+ ((float*)cvPtr1D( (hist)->bins, (idx0), 0))
+#define cvGetHistValue_2D( hist, idx0, idx1 ) \
+ ((float*)cvPtr2D( (hist)->bins, (idx0), (idx1), 0))
+#define cvGetHistValue_3D( hist, idx0, idx1, idx2 ) \
+ ((float*)cvPtr3D( (hist)->bins, (idx0), (idx1), (idx2), 0))
+#define cvGetHistValue_nD( hist, idx ) \
+ ((float*)cvPtrND( (hist)->bins, (idx), 0))
+
+
+#define CV_IS_SET_ELEM_EXISTS CV_IS_SET_ELEM
+
+
+CV_EXPORTS int cvHoughLines( CvArr* image, double rho,
+ double theta, int threshold,
+ float* lines, int linesNumber );
+
+CV_EXPORTS int cvHoughLinesP( CvArr* image, double rho,
+ double theta, int threshold,
+ int lineLength, int lineGap,
+ int* lines, int linesNumber );
+
+
+CV_EXPORTS int cvHoughLinesSDiv( CvArr* image, double rho, int srn,
+ double theta, int stn, int threshold,
+ float* lines, int linesNumber );
+
+CV_EXPORTS float cvCalcEMD( const float* signature1, int size1,
+ const float* signature2, int size2,
+ int dims, int dist_type CV_DEFAULT(CV_DIST_L2),
+ CvDistanceFunction dist_func CV_DEFAULT(0),
+ float* lower_bound CV_DEFAULT(0),
+ void* user_param CV_DEFAULT(0));
+
+CV_EXPORTS void cvKMeans( int num_clusters, float** samples,
+ int num_samples, int vec_size,
+ CvTermCriteria termcrit, int* cluster_idx );
+
+CV_EXPORTS void cvStartScanGraph( CvGraph* graph, CvGraphScanner* scanner,
+ CvGraphVtx* vtx CV_DEFAULT(NULL),
+ int mask CV_DEFAULT(CV_GRAPH_ALL_ITEMS));
+
+CV_EXPORTS void cvEndScanGraph( CvGraphScanner* scanner );
+
+
+/* old drawing functions */
+CV_EXPORTS void cvLineAA( CvArr* img, CvPoint pt1, CvPoint pt2,
+ double color, int scale CV_DEFAULT(0));
+
+CV_EXPORTS void cvCircleAA( CvArr* img, CvPoint center, int radius,
+ double color, int scale CV_DEFAULT(0) );
+
+CV_EXPORTS void cvEllipseAA( CvArr* img, CvPoint center, CvSize axes,
+ double angle, double start_angle,
+ double end_angle, double color,
+ int scale CV_DEFAULT(0) );
+
+CV_EXPORTS void cvPolyLineAA( CvArr* img, CvPoint** pts, int* npts, int contours,
+ int is_closed, double color, int scale CV_DEFAULT(0) );
+
+/****************************************************************************************\
+* Pixel Access Macros *
+\****************************************************************************************/
+
+typedef struct _CvPixelPosition8u
+{
+ uchar* currline; /* pointer to the start of the current pixel line */
+ uchar* topline; /* pointer to the start of the top pixel line */
+ uchar* bottomline; /* pointer to the start of the first line */
+ /* which is below the image */
+ int x; /* current x coordinate ( in pixels ) */
+ int width; /* width of the image ( in pixels ) */
+ int height; /* height of the image ( in pixels ) */
+ int step; /* distance between lines ( in elements of single */
+ /* plane ) */
+ int step_arr[3]; /* array: ( 0, -step, step ). It is used for */
+ /* vertical moving */
+} CvPixelPosition8u;
+
+/* this structure differs from the above only in data type */
+typedef struct _CvPixelPosition8s
+{
+ schar* currline;
+ schar* topline;
+ schar* bottomline;
+ int x;
+ int width;
+ int height;
+ int step;
+ int step_arr[3];
+} CvPixelPosition8s;
+
+/* this structure differs from the CvPixelPosition8u only in data type */
+typedef struct _CvPixelPosition32f
+{
+ float* currline;
+ float* topline;
+ float* bottomline;
+ int x;
+ int width;
+ int height;
+ int step;
+ int step_arr[3];
+} CvPixelPosition32f;
+
+
+/* Initialize one of the CvPixelPosition structures. */
+/* pos - initialized structure */
+/* origin - pointer to the left-top corner of the ROI */
+/* step - width of the whole image in bytes */
+/* roi - width & height of the ROI */
+/* x, y - initial position */
+#define CV_INIT_PIXEL_POS(pos, origin, _step, roi, _x, _y, orientation) \
+ ( \
+ (pos).step = (_step)/sizeof((pos).currline[0]) * (orientation ? -1 : 1), \
+ (pos).width = (roi).width, \
+ (pos).height = (roi).height, \
+ (pos).bottomline = (origin) + (pos).step*(pos).height, \
+ (pos).topline = (origin) - (pos).step, \
+ (pos).step_arr[0] = 0, \
+ (pos).step_arr[1] = -(pos).step, \
+ (pos).step_arr[2] = (pos).step, \
+ (pos).x = (_x), \
+ (pos).currline = (origin) + (pos).step*(_y) )
+
+
+/* Move to specified point ( absolute shift ) */
+/* pos - position structure */
+/* x, y - coordinates of the new position */
+/* cs - number of the image channels */
+#define CV_MOVE_TO( pos, _x, _y, cs ) \
+((pos).currline = (_y) >= 0 && (_y) < (pos).height ? (pos).topline + ((_y)+1)*(pos).step : 0, \
+ (pos).x = (_x) >= 0 && (_x) < (pos).width ? (_x) : 0, (pos).currline + (_x) * (cs) )
+
+/* Get current coordinates */
+/* pos - position structure */
+/* x, y - coordinates of the new position */
+/* cs - number of the image channels */
+#define CV_GET_CURRENT( pos, cs ) ((pos).currline + (pos).x * (cs))
+
+/* Move by one pixel relatively to current position */
+/* pos - position structure */
+/* cs - number of the image channels */
+
+/* left */
+#define CV_MOVE_LEFT( pos, cs ) \
+ ( --(pos).x >= 0 ? (pos).currline + (pos).x*(cs) : 0 )
+
+/* right */
+#define CV_MOVE_RIGHT( pos, cs ) \
+ ( ++(pos).x < (pos).width ? (pos).currline + (pos).x*(cs) : 0 )
+
+/* up */
+#define CV_MOVE_UP( pos, cs ) \
+ (((pos).currline -= (pos).step) != (pos).topline ? (pos).currline + (pos).x*(cs) : 0 )
+
+/* down */
+#define CV_MOVE_DOWN( pos, cs ) \
+ (((pos).currline += (pos).step) != (pos).bottomline ? (pos).currline + (pos).x*(cs) : 0 )
+
+/* left up */
+#define CV_MOVE_LU( pos, cs ) ( CV_MOVE_LEFT(pos, cs), CV_MOVE_UP(pos, cs))
+
+/* right up */
+#define CV_MOVE_RU( pos, cs ) ( CV_MOVE_RIGHT(pos, cs), CV_MOVE_UP(pos, cs))
+
+/* left down */
+#define CV_MOVE_LD( pos, cs ) ( CV_MOVE_LEFT(pos, cs), CV_MOVE_DOWN(pos, cs))
+
+/* right down */
+#define CV_MOVE_RD( pos, cs ) ( CV_MOVE_RIGHT(pos, cs), CV_MOVE_DOWN(pos, cs))
+
+
+
+/* Move by one pixel relatively to current position with wrapping when the position */
+/* achieves image boundary */
+/* pos - position structure */
+/* cs - number of the image channels */
+
+/* left */
+#define CV_MOVE_LEFT_WRAP( pos, cs ) \
+ ((pos).currline + ( --(pos).x >= 0 ? (pos).x : ((pos).x = (pos).width-1))*(cs))
+
+/* right */
+#define CV_MOVE_RIGHT_WRAP( pos, cs ) \
+ ((pos).currline + ( ++(pos).x < (pos).width ? (pos).x : ((pos).x = 0))*(cs) )
+
+/* up */
+#define CV_MOVE_UP_WRAP( pos, cs ) \
+ ((((pos).currline -= (pos).step) != (pos).topline ? \
+ (pos).currline : ((pos).currline = (pos).bottomline - (pos).step)) + (pos).x*(cs) )
+
+/* down */
+#define CV_MOVE_DOWN_WRAP( pos, cs ) \
+ ((((pos).currline += (pos).step) != (pos).bottomline ? \
+ (pos).currline : ((pos).currline = (pos).topline + (pos).step)) + (pos).x*(cs) )
+
+/* left up */
+#define CV_MOVE_LU_WRAP( pos, cs ) ( CV_MOVE_LEFT_WRAP(pos, cs), CV_MOVE_UP_WRAP(pos, cs))
+/* right up */
+#define CV_MOVE_RU_WRAP( pos, cs ) ( CV_MOVE_RIGHT_WRAP(pos, cs), CV_MOVE_UP_WRAP(pos, cs))
+/* left down */
+#define CV_MOVE_LD_WRAP( pos, cs ) ( CV_MOVE_LEFT_WRAP(pos, cs), CV_MOVE_DOWN_WRAP(pos, cs))
+/* right down */
+#define CV_MOVE_RD_WRAP( pos, cs ) ( CV_MOVE_RIGHT_WRAP(pos, cs), CV_MOVE_DOWN_WRAP(pos, cs))
+
+/* Numeric constants which used for moving in arbitrary direction */
+enum
+{
+ CV_SHIFT_NONE = 2,
+ CV_SHIFT_LEFT = 1,
+ CV_SHIFT_RIGHT = 3,
+ CV_SHIFT_UP = 6,
+ CV_SHIFT_DOWN = 10,
+ CV_SHIFT_LU = 5,
+ CV_SHIFT_RU = 7,
+ CV_SHIFT_LD = 9,
+ CV_SHIFT_RD = 11
+};
+
+/* Move by one pixel in specified direction */
+/* pos - position structure */
+/* shift - direction ( it's value must be one of the CV_SHIFT_Ö constants ) */
+/* cs - number of the image channels */
+#define CV_MOVE_PARAM( pos, shift, cs ) \
+ ( (pos).currline += (pos).step_arr[(shift)>>2], (pos).x += ((shift)&3)-2, \
+ ((pos).currline != (pos).topline && (pos).currline != (pos).bottomline && \
+ (pos).x >= 0 && (pos).x < (pos).width) ? (pos).currline + (pos).x*(cs) : 0 )
+
+/* Move by one pixel in specified direction with wrapping when the */
+/* position achieves image boundary */
+/* pos - position structure */
+/* shift - direction ( it's value must be one of the CV_SHIFT_Ö constants ) */
+/* cs - number of the image channels */
+#define CV_MOVE_PARAM_WRAP( pos, shift, cs ) \
+ ( (pos).currline += (pos).step_arr[(shift)>>2], \
+ (pos).currline = ((pos).currline == (pos).topline ? \
+ (pos).bottomline - (pos).step : \
+ (pos).currline == (pos).bottomline ? \
+ (pos).topline + (pos).step : (pos).currline), \
+ \
+ (pos).x += ((shift)&3)-2, \
+ (pos).x = ((pos).x < 0 ? (pos).width-1 : (pos).x >= (pos).width ? 0 : (pos).x), \
+ \
+ (pos).currline + (pos).x*(cs) )
+
+
+typedef float* CvVect32f;
+typedef float* CvMatr32f;
+typedef double* CvVect64d;
+typedef double* CvMatr64d;
+
+CV_EXPORTS void cvUnDistortOnce( const CvArr* src, CvArr* dst,
+ const float* intrinsic_matrix,
+ const float* distortion_coeffs,
+ int interpolate );
+
+/* the two functions below have quite hackerish implementations, use with care
+ (or, which is better, switch to cvUndistortInitMap and cvRemap instead */
+CV_EXPORTS void cvUnDistortInit( const CvArr* src,
+ CvArr* undistortion_map,
+ const float* A, const float* k,
+ int interpolate );
+
+CV_EXPORTS void cvUnDistort( const CvArr* src, CvArr* dst,
+ const CvArr* undistortion_map,
+ int interpolate );
+
+/* Find fundamental matrix */
+CV_EXPORTS void cvFindFundamentalMatrix( int* points1, int* points2,
+ int numpoints, int method, float* matrix );
+
+
+CV_EXPORTS int cvFindChessBoardCornerGuesses( const void* arr, void* thresharr,
+ CvMemStorage* storage,
+ CvSize pattern_size, CvPoint2D32f * corners,
+ int *corner_count );
+
+/* Calibrates camera using multiple views of calibration pattern */
+CV_EXPORTS void cvCalibrateCamera( int image_count, int* _point_counts,
+ CvSize image_size, CvPoint2D32f* _image_points, CvPoint3D32f* _object_points,
+ float* _distortion_coeffs, float* _camera_matrix, float* _translation_vectors,
+ float* _rotation_matrices, int flags );
+
+
+CV_EXPORTS void cvCalibrateCamera_64d( int image_count, int* _point_counts,
+ CvSize image_size, CvPoint2D64f* _image_points, CvPoint3D64f* _object_points,
+ double* _distortion_coeffs, double* _camera_matrix, double* _translation_vectors,
+ double* _rotation_matrices, int flags );
+
+
+/* Find 3d position of object given intrinsic camera parameters,
+ 3d model of the object and projection of the object into view plane */
+CV_EXPORTS void cvFindExtrinsicCameraParams( int point_count,
+ CvSize image_size, CvPoint2D32f* _image_points,
+ CvPoint3D32f* _object_points, float* focal_length,
+ CvPoint2D32f principal_point, float* _distortion_coeffs,
+ float* _rotation_vector, float* _translation_vector );
+
+/* Variant of the previous function that takes double-precision parameters */
+CV_EXPORTS void cvFindExtrinsicCameraParams_64d( int point_count,
+ CvSize image_size, CvPoint2D64f* _image_points,
+ CvPoint3D64f* _object_points, double* focal_length,
+ CvPoint2D64f principal_point, double* _distortion_coeffs,
+ double* _rotation_vector, double* _translation_vector );
+
+/* Rodrigues transform */
+enum
+{
+ CV_RODRIGUES_M2V = 0,
+ CV_RODRIGUES_V2M = 1
+};
+
+/* Converts rotation_matrix matrix to rotation_matrix vector or vice versa */
+CV_EXPORTS void cvRodrigues( CvMat* rotation_matrix, CvMat* rotation_vector,
+ CvMat* jacobian, int conv_type );
+
+/* Does reprojection of 3d object points to the view plane */
+CV_EXPORTS void cvProjectPoints( int point_count, CvPoint3D64f* _object_points,
+ double* _rotation_vector, double* _translation_vector,
+ double* focal_length, CvPoint2D64f principal_point,
+ double* _distortion, CvPoint2D64f* _image_points,
+ double* _deriv_points_rotation_matrix,
+ double* _deriv_points_translation_vect,
+ double* _deriv_points_focal,
+ double* _deriv_points_principal_point,
+ double* _deriv_points_distortion_coeffs );
+
+
+/* Simpler version of the previous function */
+CV_EXPORTS void cvProjectPointsSimple( int point_count, CvPoint3D64f* _object_points,
+ double* _rotation_matrix, double* _translation_vector,
+ double* _camera_matrix, double* _distortion, CvPoint2D64f* _image_points );
+
+
+#define cvMake2DPoints cvConvertPointsHomogeneous
+#define cvMake3DPoints cvConvertPointsHomogeneous
+
+#define cvWarpPerspectiveQMatrix cvGetPerspectiveTransform
+
+#define cvConvertPointsHomogenious cvConvertPointsHomogeneous
+
+
+//////////////////////////////////// feature extractors: obsolete API //////////////////////////////////
+
+typedef struct CvSURFPoint
+{
+ CvPoint2D32f pt;
+
+ int laplacian;
+ int size;
+ float dir;
+ float hessian;
+
+} CvSURFPoint;
+
+CV_INLINE CvSURFPoint cvSURFPoint( CvPoint2D32f pt, int laplacian,
+ int size, float dir CV_DEFAULT(0),
+ float hessian CV_DEFAULT(0))
+{
+ CvSURFPoint kp;
+
+ kp.pt = pt;
+ kp.laplacian = laplacian;
+ kp.size = size;
+ kp.dir = dir;
+ kp.hessian = hessian;
+
+ return kp;
+}
+
+typedef struct CvSURFParams
+{
+ int extended;
+ int upright;
+ double hessianThreshold;
+
+ int nOctaves;
+ int nOctaveLayers;
+
+} CvSURFParams;
+
+CVAPI(CvSURFParams) cvSURFParams( double hessianThreshold, int extended CV_DEFAULT(0) );
+
+// If useProvidedKeyPts!=0, keypoints are not detected, but descriptors are computed
+// at the locations provided in keypoints (a CvSeq of CvSURFPoint).
+CVAPI(void) cvExtractSURF( const CvArr* img, const CvArr* mask,
+ CvSeq** keypoints, CvSeq** descriptors,
+ CvMemStorage* storage, CvSURFParams params,
+ int useProvidedKeyPts CV_DEFAULT(0) );
+
+/*!
+ Maximal Stable Regions Parameters
+ */
+typedef struct CvMSERParams
+{
+ //! delta, in the code, it compares (size_{i}-size_{i-delta})/size_{i-delta}
+ int delta;
+ //! prune the area which bigger than maxArea
+ int maxArea;
+ //! prune the area which smaller than minArea
+ int minArea;
+ //! prune the area have simliar size to its children
+ float maxVariation;
+ //! trace back to cut off mser with diversity < min_diversity
+ float minDiversity;
+
+ /////// the next few params for MSER of color image
+
+ //! for color image, the evolution steps
+ int maxEvolution;
+ //! the area threshold to cause re-initialize
+ double areaThreshold;
+ //! ignore too small margin
+ double minMargin;
+ //! the aperture size for edge blur
+ int edgeBlurSize;
+} CvMSERParams;
+
+CVAPI(CvMSERParams) cvMSERParams( int delta CV_DEFAULT(5), int min_area CV_DEFAULT(60),
+ int max_area CV_DEFAULT(14400), float max_variation CV_DEFAULT(.25f),
+ float min_diversity CV_DEFAULT(.2f), int max_evolution CV_DEFAULT(200),
+ double area_threshold CV_DEFAULT(1.01),
+ double min_margin CV_DEFAULT(.003),
+ int edge_blur_size CV_DEFAULT(5) );
+
+// Extracts the contours of Maximally Stable Extremal Regions
+CVAPI(void) cvExtractMSER( CvArr* _img, CvArr* _mask, CvSeq** contours, CvMemStorage* storage, CvMSERParams params );
+
+
+typedef struct CvStarKeypoint
+{
+ CvPoint pt;
+ int size;
+ float response;
+} CvStarKeypoint;
+
+CV_INLINE CvStarKeypoint cvStarKeypoint(CvPoint pt, int size, float response)
+{
+ CvStarKeypoint kpt;
+ kpt.pt = pt;
+ kpt.size = size;
+ kpt.response = response;
+ return kpt;
+}
+
+typedef struct CvStarDetectorParams
+{
+ int maxSize;
+ int responseThreshold;
+ int lineThresholdProjected;
+ int lineThresholdBinarized;
+ int suppressNonmaxSize;
+} CvStarDetectorParams;
+
+CV_INLINE CvStarDetectorParams cvStarDetectorParams(
+ int maxSize CV_DEFAULT(45),
+ int responseThreshold CV_DEFAULT(30),
+ int lineThresholdProjected CV_DEFAULT(10),
+ int lineThresholdBinarized CV_DEFAULT(8),
+ int suppressNonmaxSize CV_DEFAULT(5))
+{
+ CvStarDetectorParams params;
+ params.maxSize = maxSize;
+ params.responseThreshold = responseThreshold;
+ params.lineThresholdProjected = lineThresholdProjected;
+ params.lineThresholdBinarized = lineThresholdBinarized;
+ params.suppressNonmaxSize = suppressNonmaxSize;
+
+ return params;
+}
+
+CVAPI(CvSeq*) cvGetStarKeypoints( const CvArr* img, CvMemStorage* storage,
+ CvStarDetectorParams params CV_DEFAULT(cvStarDetectorParams()));
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/legacy/legacy.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/legacy/legacy.hpp
new file mode 100644
index 00000000..96da25c9
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/legacy/legacy.hpp
@@ -0,0 +1,3436 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// Intel License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000, Intel Corporation, all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of Intel Corporation may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_LEGACY_HPP__
+#define __OPENCV_LEGACY_HPP__
+
+#include "opencv2/imgproc/imgproc.hpp"
+#include "opencv2/imgproc/imgproc_c.h"
+#include "opencv2/features2d/features2d.hpp"
+#include "opencv2/calib3d/calib3d.hpp"
+#include "opencv2/ml/ml.hpp"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+CVAPI(CvSeq*) cvSegmentImage( const CvArr* srcarr, CvArr* dstarr,
+ double canny_threshold,
+ double ffill_threshold,
+ CvMemStorage* storage );
+
+/****************************************************************************************\
+* Eigen objects *
+\****************************************************************************************/
+
+typedef int (CV_CDECL * CvCallback)(int index, void* buffer, void* user_data);
+typedef union
+{
+ CvCallback callback;
+ void* data;
+}
+CvInput;
+
+#define CV_EIGOBJ_NO_CALLBACK 0
+#define CV_EIGOBJ_INPUT_CALLBACK 1
+#define CV_EIGOBJ_OUTPUT_CALLBACK 2
+#define CV_EIGOBJ_BOTH_CALLBACK 3
+
+/* Calculates covariation matrix of a set of arrays */
+CVAPI(void) cvCalcCovarMatrixEx( int nObjects, void* input, int ioFlags,
+ int ioBufSize, uchar* buffer, void* userData,
+ IplImage* avg, float* covarMatrix );
+
+/* Calculates eigen values and vectors of covariation matrix of a set of
+ arrays */
+CVAPI(void) cvCalcEigenObjects( int nObjects, void* input, void* output,
+ int ioFlags, int ioBufSize, void* userData,
+ CvTermCriteria* calcLimit, IplImage* avg,
+ float* eigVals );
+
+/* Calculates dot product (obj - avg) * eigObj (i.e. projects image to eigen vector) */
+CVAPI(double) cvCalcDecompCoeff( IplImage* obj, IplImage* eigObj, IplImage* avg );
+
+/* Projects image to eigen space (finds all decomposion coefficients */
+CVAPI(void) cvEigenDecomposite( IplImage* obj, int nEigObjs, void* eigInput,
+ int ioFlags, void* userData, IplImage* avg,
+ float* coeffs );
+
+/* Projects original objects used to calculate eigen space basis to that space */
+CVAPI(void) cvEigenProjection( void* eigInput, int nEigObjs, int ioFlags,
+ void* userData, float* coeffs, IplImage* avg,
+ IplImage* proj );
+
+/****************************************************************************************\
+* 1D/2D HMM *
+\****************************************************************************************/
+
+typedef struct CvImgObsInfo
+{
+ int obs_x;
+ int obs_y;
+ int obs_size;
+ float* obs;//consequtive observations
+
+ int* state;/* arr of pairs superstate/state to which observation belong */
+ int* mix; /* number of mixture to which observation belong */
+
+} CvImgObsInfo;/*struct for 1 image*/
+
+typedef CvImgObsInfo Cv1DObsInfo;
+
+typedef struct CvEHMMState
+{
+ int num_mix; /*number of mixtures in this state*/
+ float* mu; /*mean vectors corresponding to each mixture*/
+ float* inv_var; /* square root of inversed variances corresp. to each mixture*/
+ float* log_var_val; /* sum of 0.5 (LN2PI + ln(variance[i]) ) for i=1,n */
+ float* weight; /*array of mixture weights. Summ of all weights in state is 1. */
+
+} CvEHMMState;
+
+typedef struct CvEHMM
+{
+ int level; /* 0 - lowest(i.e its states are real states), ..... */
+ int num_states; /* number of HMM states */
+ float* transP;/*transition probab. matrices for states */
+ float** obsProb; /* if level == 0 - array of brob matrices corresponding to hmm
+ if level == 1 - martix of matrices */
+ union
+ {
+ CvEHMMState* state; /* if level == 0 points to real states array,
+ if not - points to embedded hmms */
+ struct CvEHMM* ehmm; /* pointer to an embedded model or NULL, if it is a leaf */
+ } u;
+
+} CvEHMM;
+
+/*CVAPI(int) icvCreate1DHMM( CvEHMM** this_hmm,
+ int state_number, int* num_mix, int obs_size );
+
+CVAPI(int) icvRelease1DHMM( CvEHMM** phmm );
+
+CVAPI(int) icvUniform1DSegm( Cv1DObsInfo* obs_info, CvEHMM* hmm );
+
+CVAPI(int) icvInit1DMixSegm( Cv1DObsInfo** obs_info_array, int num_img, CvEHMM* hmm);
+
+CVAPI(int) icvEstimate1DHMMStateParams( CvImgObsInfo** obs_info_array, int num_img, CvEHMM* hmm);
+
+CVAPI(int) icvEstimate1DObsProb( CvImgObsInfo* obs_info, CvEHMM* hmm );
+
+CVAPI(int) icvEstimate1DTransProb( Cv1DObsInfo** obs_info_array,
+ int num_seq,
+ CvEHMM* hmm );
+
+CVAPI(float) icvViterbi( Cv1DObsInfo* obs_info, CvEHMM* hmm);
+
+CVAPI(int) icv1DMixSegmL2( CvImgObsInfo** obs_info_array, int num_img, CvEHMM* hmm );*/
+
+/*********************************** Embedded HMMs *************************************/
+
+/* Creates 2D HMM */
+CVAPI(CvEHMM*) cvCreate2DHMM( int* stateNumber, int* numMix, int obsSize );
+
+/* Releases HMM */
+CVAPI(void) cvRelease2DHMM( CvEHMM** hmm );
+
+#define CV_COUNT_OBS(roi, win, delta, numObs ) \
+{ \
+ (numObs)->width =((roi)->width -(win)->width +(delta)->width)/(delta)->width; \
+ (numObs)->height =((roi)->height -(win)->height +(delta)->height)/(delta)->height;\
+}
+
+/* Creates storage for observation vectors */
+CVAPI(CvImgObsInfo*) cvCreateObsInfo( CvSize numObs, int obsSize );
+
+/* Releases storage for observation vectors */
+CVAPI(void) cvReleaseObsInfo( CvImgObsInfo** obs_info );
+
+
+/* The function takes an image on input and and returns the sequnce of observations
+ to be used with an embedded HMM; Each observation is top-left block of DCT
+ coefficient matrix */
+CVAPI(void) cvImgToObs_DCT( const CvArr* arr, float* obs, CvSize dctSize,
+ CvSize obsSize, CvSize delta );
+
+
+/* Uniformly segments all observation vectors extracted from image */
+CVAPI(void) cvUniformImgSegm( CvImgObsInfo* obs_info, CvEHMM* ehmm );
+
+/* Does mixture segmentation of the states of embedded HMM */
+CVAPI(void) cvInitMixSegm( CvImgObsInfo** obs_info_array,
+ int num_img, CvEHMM* hmm );
+
+/* Function calculates means, variances, weights of every Gaussian mixture
+ of every low-level state of embedded HMM */
+CVAPI(void) cvEstimateHMMStateParams( CvImgObsInfo** obs_info_array,
+ int num_img, CvEHMM* hmm );
+
+/* Function computes transition probability matrices of embedded HMM
+ given observations segmentation */
+CVAPI(void) cvEstimateTransProb( CvImgObsInfo** obs_info_array,
+ int num_img, CvEHMM* hmm );
+
+/* Function computes probabilities of appearing observations at any state
+ (i.e. computes P(obs|state) for every pair(obs,state)) */
+CVAPI(void) cvEstimateObsProb( CvImgObsInfo* obs_info,
+ CvEHMM* hmm );
+
+/* Runs Viterbi algorithm for embedded HMM */
+CVAPI(float) cvEViterbi( CvImgObsInfo* obs_info, CvEHMM* hmm );
+
+
+/* Function clusters observation vectors from several images
+ given observations segmentation.
+ Euclidean distance used for clustering vectors.
+ Centers of clusters are given means of every mixture */
+CVAPI(void) cvMixSegmL2( CvImgObsInfo** obs_info_array,
+ int num_img, CvEHMM* hmm );
+
+/****************************************************************************************\
+* A few functions from old stereo gesture recognition demosions *
+\****************************************************************************************/
+
+/* Creates hand mask image given several points on the hand */
+CVAPI(void) cvCreateHandMask( CvSeq* hand_points,
+ IplImage *img_mask, CvRect *roi);
+
+/* Finds hand region in range image data */
+CVAPI(void) cvFindHandRegion (CvPoint3D32f* points, int count,
+ CvSeq* indexs,
+ float* line, CvSize2D32f size, int flag,
+ CvPoint3D32f* center,
+ CvMemStorage* storage, CvSeq **numbers);
+
+/* Finds hand region in range image data (advanced version) */
+CVAPI(void) cvFindHandRegionA( CvPoint3D32f* points, int count,
+ CvSeq* indexs,
+ float* line, CvSize2D32f size, int jc,
+ CvPoint3D32f* center,
+ CvMemStorage* storage, CvSeq **numbers);
+
+/* Calculates the cooficients of the homography matrix */
+CVAPI(void) cvCalcImageHomography( float* line, CvPoint3D32f* center,
+ float* intrinsic, float* homography );
+
+/****************************************************************************************\
+* More operations on sequences *
+\****************************************************************************************/
+
+/*****************************************************************************************/
+
+#define CV_CURRENT_INT( reader ) (*((int *)(reader).ptr))
+#define CV_PREV_INT( reader ) (*((int *)(reader).prev_elem))
+
+#define CV_GRAPH_WEIGHTED_VERTEX_FIELDS() CV_GRAPH_VERTEX_FIELDS()\
+ float weight;
+
+#define CV_GRAPH_WEIGHTED_EDGE_FIELDS() CV_GRAPH_EDGE_FIELDS()
+
+typedef struct CvGraphWeightedVtx
+{
+ CV_GRAPH_WEIGHTED_VERTEX_FIELDS()
+} CvGraphWeightedVtx;
+
+typedef struct CvGraphWeightedEdge
+{
+ CV_GRAPH_WEIGHTED_EDGE_FIELDS()
+} CvGraphWeightedEdge;
+
+typedef enum CvGraphWeightType
+{
+ CV_NOT_WEIGHTED,
+ CV_WEIGHTED_VTX,
+ CV_WEIGHTED_EDGE,
+ CV_WEIGHTED_ALL
+} CvGraphWeightType;
+
+
+/* Calculates histogram of a contour */
+CVAPI(void) cvCalcPGH( const CvSeq* contour, CvHistogram* hist );
+
+#define CV_DOMINANT_IPAN 1
+
+/* Finds high-curvature points of the contour */
+CVAPI(CvSeq*) cvFindDominantPoints( CvSeq* contour, CvMemStorage* storage,
+ int method CV_DEFAULT(CV_DOMINANT_IPAN),
+ double parameter1 CV_DEFAULT(0),
+ double parameter2 CV_DEFAULT(0),
+ double parameter3 CV_DEFAULT(0),
+ double parameter4 CV_DEFAULT(0));
+
+/*****************************************************************************************/
+
+
+/*******************************Stereo correspondence*************************************/
+
+typedef struct CvCliqueFinder
+{
+ CvGraph* graph;
+ int** adj_matr;
+ int N; //graph size
+
+ // stacks, counters etc/
+ int k; //stack size
+ int* current_comp;
+ int** All;
+
+ int* ne;
+ int* ce;
+ int* fixp; //node with minimal disconnections
+ int* nod;
+ int* s; //for selected candidate
+ int status;
+ int best_score;
+ int weighted;
+ int weighted_edges;
+ float best_weight;
+ float* edge_weights;
+ float* vertex_weights;
+ float* cur_weight;
+ float* cand_weight;
+
+} CvCliqueFinder;
+
+#define CLIQUE_TIME_OFF 2
+#define CLIQUE_FOUND 1
+#define CLIQUE_END 0
+
+/*CVAPI(void) cvStartFindCliques( CvGraph* graph, CvCliqueFinder* finder, int reverse,
+ int weighted CV_DEFAULT(0), int weighted_edges CV_DEFAULT(0));
+CVAPI(int) cvFindNextMaximalClique( CvCliqueFinder* finder, int* clock_rest CV_DEFAULT(0) );
+CVAPI(void) cvEndFindCliques( CvCliqueFinder* finder );
+
+CVAPI(void) cvBronKerbosch( CvGraph* graph );*/
+
+
+/*F///////////////////////////////////////////////////////////////////////////////////////
+//
+// Name: cvSubgraphWeight
+// Purpose: finds weight of subgraph in a graph
+// Context:
+// Parameters:
+// graph - input graph.
+// subgraph - sequence of pairwise different ints. These are indices of vertices of subgraph.
+// weight_type - describes the way we measure weight.
+// one of the following:
+// CV_NOT_WEIGHTED - weight of a clique is simply its size
+// CV_WEIGHTED_VTX - weight of a clique is the sum of weights of its vertices
+// CV_WEIGHTED_EDGE - the same but edges
+// CV_WEIGHTED_ALL - the same but both edges and vertices
+// weight_vtx - optional vector of floats, with size = graph->total.
+// If weight_type is either CV_WEIGHTED_VTX or CV_WEIGHTED_ALL
+// weights of vertices must be provided. If weight_vtx not zero
+// these weights considered to be here, otherwise function assumes
+// that vertices of graph are inherited from CvGraphWeightedVtx.
+// weight_edge - optional matrix of floats, of width and height = graph->total.
+// If weight_type is either CV_WEIGHTED_EDGE or CV_WEIGHTED_ALL
+// weights of edges ought to be supplied. If weight_edge is not zero
+// function finds them here, otherwise function expects
+// edges of graph to be inherited from CvGraphWeightedEdge.
+// If this parameter is not zero structure of the graph is determined from matrix
+// rather than from CvGraphEdge's. In particular, elements corresponding to
+// absent edges should be zero.
+// Returns:
+// weight of subgraph.
+// Notes:
+//F*/
+/*CVAPI(float) cvSubgraphWeight( CvGraph *graph, CvSeq *subgraph,
+ CvGraphWeightType weight_type CV_DEFAULT(CV_NOT_WEIGHTED),
+ CvVect32f weight_vtx CV_DEFAULT(0),
+ CvMatr32f weight_edge CV_DEFAULT(0) );*/
+
+
+/*F///////////////////////////////////////////////////////////////////////////////////////
+//
+// Name: cvFindCliqueEx
+// Purpose: tries to find clique with maximum possible weight in a graph
+// Context:
+// Parameters:
+// graph - input graph.
+// storage - memory storage to be used by the result.
+// is_complementary - optional flag showing whether function should seek for clique
+// in complementary graph.
+// weight_type - describes our notion about weight.
+// one of the following:
+// CV_NOT_WEIGHTED - weight of a clique is simply its size
+// CV_WEIGHTED_VTX - weight of a clique is the sum of weights of its vertices
+// CV_WEIGHTED_EDGE - the same but edges
+// CV_WEIGHTED_ALL - the same but both edges and vertices
+// weight_vtx - optional vector of floats, with size = graph->total.
+// If weight_type is either CV_WEIGHTED_VTX or CV_WEIGHTED_ALL
+// weights of vertices must be provided. If weight_vtx not zero
+// these weights considered to be here, otherwise function assumes
+// that vertices of graph are inherited from CvGraphWeightedVtx.
+// weight_edge - optional matrix of floats, of width and height = graph->total.
+// If weight_type is either CV_WEIGHTED_EDGE or CV_WEIGHTED_ALL
+// weights of edges ought to be supplied. If weight_edge is not zero
+// function finds them here, otherwise function expects
+// edges of graph to be inherited from CvGraphWeightedEdge.
+// Note that in case of CV_WEIGHTED_EDGE or CV_WEIGHTED_ALL
+// nonzero is_complementary implies nonzero weight_edge.
+// start_clique - optional sequence of pairwise different ints. They are indices of
+// vertices that shall be present in the output clique.
+// subgraph_of_ban - optional sequence of (maybe equal) ints. They are indices of
+// vertices that shall not be present in the output clique.
+// clique_weight_ptr - optional output parameter. Weight of found clique stored here.
+// num_generations - optional number of generations in evolutionary part of algorithm,
+// zero forces to return first found clique.
+// quality - optional parameter determining degree of required quality/speed tradeoff.
+// Must be in the range from 0 to 9.
+// 0 is fast and dirty, 9 is slow but hopefully yields good clique.
+// Returns:
+// sequence of pairwise different ints.
+// These are indices of vertices that form found clique.
+// Notes:
+// in cases of CV_WEIGHTED_EDGE and CV_WEIGHTED_ALL weights should be nonnegative.
+// start_clique has a priority over subgraph_of_ban.
+//F*/
+/*CVAPI(CvSeq*) cvFindCliqueEx( CvGraph *graph, CvMemStorage *storage,
+ int is_complementary CV_DEFAULT(0),
+ CvGraphWeightType weight_type CV_DEFAULT(CV_NOT_WEIGHTED),
+ CvVect32f weight_vtx CV_DEFAULT(0),
+ CvMatr32f weight_edge CV_DEFAULT(0),
+ CvSeq *start_clique CV_DEFAULT(0),
+ CvSeq *subgraph_of_ban CV_DEFAULT(0),
+ float *clique_weight_ptr CV_DEFAULT(0),
+ int num_generations CV_DEFAULT(3),
+ int quality CV_DEFAULT(2) );*/
+
+
+#define CV_UNDEF_SC_PARAM 12345 //default value of parameters
+
+#define CV_IDP_BIRCHFIELD_PARAM1 25
+#define CV_IDP_BIRCHFIELD_PARAM2 5
+#define CV_IDP_BIRCHFIELD_PARAM3 12
+#define CV_IDP_BIRCHFIELD_PARAM4 15
+#define CV_IDP_BIRCHFIELD_PARAM5 25
+
+
+#define CV_DISPARITY_BIRCHFIELD 0
+
+
+/*F///////////////////////////////////////////////////////////////////////////
+//
+// Name: cvFindStereoCorrespondence
+// Purpose: find stereo correspondence on stereo-pair
+// Context:
+// Parameters:
+// leftImage - left image of stereo-pair (format 8uC1).
+// rightImage - right image of stereo-pair (format 8uC1).
+// mode - mode of correspondence retrieval (now CV_DISPARITY_BIRCHFIELD only)
+// dispImage - destination disparity image
+// maxDisparity - maximal disparity
+// param1, param2, param3, param4, param5 - parameters of algorithm
+// Returns:
+// Notes:
+// Images must be rectified.
+// All images must have format 8uC1.
+//F*/
+CVAPI(void)
+cvFindStereoCorrespondence(
+ const CvArr* leftImage, const CvArr* rightImage,
+ int mode,
+ CvArr* dispImage,
+ int maxDisparity,
+ double param1 CV_DEFAULT(CV_UNDEF_SC_PARAM),
+ double param2 CV_DEFAULT(CV_UNDEF_SC_PARAM),
+ double param3 CV_DEFAULT(CV_UNDEF_SC_PARAM),
+ double param4 CV_DEFAULT(CV_UNDEF_SC_PARAM),
+ double param5 CV_DEFAULT(CV_UNDEF_SC_PARAM) );
+
+/*****************************************************************************************/
+/************ Epiline functions *******************/
+
+
+
+typedef struct CvStereoLineCoeff
+{
+ double Xcoef;
+ double XcoefA;
+ double XcoefB;
+ double XcoefAB;
+
+ double Ycoef;
+ double YcoefA;
+ double YcoefB;
+ double YcoefAB;
+
+ double Zcoef;
+ double ZcoefA;
+ double ZcoefB;
+ double ZcoefAB;
+}CvStereoLineCoeff;
+
+
+typedef struct CvCamera
+{
+ float imgSize[2]; /* size of the camera view, used during calibration */
+ float matrix[9]; /* intinsic camera parameters: [ fx 0 cx; 0 fy cy; 0 0 1 ] */
+ float distortion[4]; /* distortion coefficients - two coefficients for radial distortion
+ and another two for tangential: [ k1 k2 p1 p2 ] */
+ float rotMatr[9];
+ float transVect[3]; /* rotation matrix and transition vector relatively
+ to some reference point in the space. */
+} CvCamera;
+
+typedef struct CvStereoCamera
+{
+ CvCamera* camera[2]; /* two individual camera parameters */
+ float fundMatr[9]; /* fundamental matrix */
+
+ /* New part for stereo */
+ CvPoint3D32f epipole[2];
+ CvPoint2D32f quad[2][4]; /* coordinates of destination quadrangle after
+ epipolar geometry rectification */
+ double coeffs[2][3][3];/* coefficients for transformation */
+ CvPoint2D32f border[2][4];
+ CvSize warpSize;
+ CvStereoLineCoeff* lineCoeffs;
+ int needSwapCameras;/* flag set to 1 if need to swap cameras for good reconstruction */
+ float rotMatrix[9];
+ float transVector[3];
+} CvStereoCamera;
+
+
+typedef struct CvContourOrientation
+{
+ float egvals[2];
+ float egvects[4];
+
+ float max, min; // minimum and maximum projections
+ int imax, imin;
+} CvContourOrientation;
+
+#define CV_CAMERA_TO_WARP 1
+#define CV_WARP_TO_CAMERA 2
+
+CVAPI(int) icvConvertWarpCoordinates(double coeffs[3][3],
+ CvPoint2D32f* cameraPoint,
+ CvPoint2D32f* warpPoint,
+ int direction);
+
+CVAPI(int) icvGetSymPoint3D( CvPoint3D64f pointCorner,
+ CvPoint3D64f point1,
+ CvPoint3D64f point2,
+ CvPoint3D64f *pointSym2);
+
+CVAPI(void) icvGetPieceLength3D(CvPoint3D64f point1,CvPoint3D64f point2,double* dist);
+
+CVAPI(int) icvCompute3DPoint( double alpha,double betta,
+ CvStereoLineCoeff* coeffs,
+ CvPoint3D64f* point);
+
+CVAPI(int) icvCreateConvertMatrVect( double* rotMatr1,
+ double* transVect1,
+ double* rotMatr2,
+ double* transVect2,
+ double* convRotMatr,
+ double* convTransVect);
+
+CVAPI(int) icvConvertPointSystem(CvPoint3D64f M2,
+ CvPoint3D64f* M1,
+ double* rotMatr,
+ double* transVect
+ );
+
+CVAPI(int) icvComputeCoeffForStereo( CvStereoCamera* stereoCamera);
+
+CVAPI(int) icvGetCrossPieceVector(CvPoint2D32f p1_start,CvPoint2D32f p1_end,CvPoint2D32f v2_start,CvPoint2D32f v2_end,CvPoint2D32f *cross);
+CVAPI(int) icvGetCrossLineDirect(CvPoint2D32f p1,CvPoint2D32f p2,float a,float b,float c,CvPoint2D32f* cross);
+CVAPI(float) icvDefinePointPosition(CvPoint2D32f point1,CvPoint2D32f point2,CvPoint2D32f point);
+CVAPI(int) icvStereoCalibration( int numImages,
+ int* nums,
+ CvSize imageSize,
+ CvPoint2D32f* imagePoints1,
+ CvPoint2D32f* imagePoints2,
+ CvPoint3D32f* objectPoints,
+ CvStereoCamera* stereoparams
+ );
+
+
+CVAPI(int) icvComputeRestStereoParams(CvStereoCamera *stereoparams);
+
+CVAPI(void) cvComputePerspectiveMap( const double coeffs[3][3], CvArr* rectMapX, CvArr* rectMapY );
+
+CVAPI(int) icvComCoeffForLine( CvPoint2D64f point1,
+ CvPoint2D64f point2,
+ CvPoint2D64f point3,
+ CvPoint2D64f point4,
+ double* camMatr1,
+ double* rotMatr1,
+ double* transVect1,
+ double* camMatr2,
+ double* rotMatr2,
+ double* transVect2,
+ CvStereoLineCoeff* coeffs,
+ int* needSwapCameras);
+
+CVAPI(int) icvGetDirectionForPoint( CvPoint2D64f point,
+ double* camMatr,
+ CvPoint3D64f* direct);
+
+CVAPI(int) icvGetCrossLines(CvPoint3D64f point11,CvPoint3D64f point12,
+ CvPoint3D64f point21,CvPoint3D64f point22,
+ CvPoint3D64f* midPoint);
+
+CVAPI(int) icvComputeStereoLineCoeffs( CvPoint3D64f pointA,
+ CvPoint3D64f pointB,
+ CvPoint3D64f pointCam1,
+ double gamma,
+ CvStereoLineCoeff* coeffs);
+
+/*CVAPI(int) icvComputeFundMatrEpipoles ( double* camMatr1,
+ double* rotMatr1,
+ double* transVect1,
+ double* camMatr2,
+ double* rotMatr2,
+ double* transVect2,
+ CvPoint2D64f* epipole1,
+ CvPoint2D64f* epipole2,
+ double* fundMatr);*/
+
+CVAPI(int) icvGetAngleLine( CvPoint2D64f startPoint, CvSize imageSize,CvPoint2D64f *point1,CvPoint2D64f *point2);
+
+CVAPI(void) icvGetCoefForPiece( CvPoint2D64f p_start,CvPoint2D64f p_end,
+ double *a,double *b,double *c,
+ int* result);
+
+/*CVAPI(void) icvGetCommonArea( CvSize imageSize,
+ CvPoint2D64f epipole1,CvPoint2D64f epipole2,
+ double* fundMatr,
+ double* coeff11,double* coeff12,
+ double* coeff21,double* coeff22,
+ int* result);*/
+
+CVAPI(void) icvComputeeInfiniteProject1(double* rotMatr,
+ double* camMatr1,
+ double* camMatr2,
+ CvPoint2D32f point1,
+ CvPoint2D32f *point2);
+
+CVAPI(void) icvComputeeInfiniteProject2(double* rotMatr,
+ double* camMatr1,
+ double* camMatr2,
+ CvPoint2D32f* point1,
+ CvPoint2D32f point2);
+
+CVAPI(void) icvGetCrossDirectDirect( double* direct1,double* direct2,
+ CvPoint2D64f *cross,int* result);
+
+CVAPI(void) icvGetCrossPieceDirect( CvPoint2D64f p_start,CvPoint2D64f p_end,
+ double a,double b,double c,
+ CvPoint2D64f *cross,int* result);
+
+CVAPI(void) icvGetCrossPiecePiece( CvPoint2D64f p1_start,CvPoint2D64f p1_end,
+ CvPoint2D64f p2_start,CvPoint2D64f p2_end,
+ CvPoint2D64f* cross,
+ int* result);
+
+CVAPI(void) icvGetPieceLength(CvPoint2D64f point1,CvPoint2D64f point2,double* dist);
+
+CVAPI(void) icvGetCrossRectDirect( CvSize imageSize,
+ double a,double b,double c,
+ CvPoint2D64f *start,CvPoint2D64f *end,
+ int* result);
+
+CVAPI(void) icvProjectPointToImage( CvPoint3D64f point,
+ double* camMatr,double* rotMatr,double* transVect,
+ CvPoint2D64f* projPoint);
+
+CVAPI(void) icvGetQuadsTransform( CvSize imageSize,
+ double* camMatr1,
+ double* rotMatr1,
+ double* transVect1,
+ double* camMatr2,
+ double* rotMatr2,
+ double* transVect2,
+ CvSize* warpSize,
+ double quad1[4][2],
+ double quad2[4][2],
+ double* fundMatr,
+ CvPoint3D64f* epipole1,
+ CvPoint3D64f* epipole2
+ );
+
+CVAPI(void) icvGetQuadsTransformStruct( CvStereoCamera* stereoCamera);
+
+CVAPI(void) icvComputeStereoParamsForCameras(CvStereoCamera* stereoCamera);
+
+CVAPI(void) icvGetCutPiece( double* areaLineCoef1,double* areaLineCoef2,
+ CvPoint2D64f epipole,
+ CvSize imageSize,
+ CvPoint2D64f* point11,CvPoint2D64f* point12,
+ CvPoint2D64f* point21,CvPoint2D64f* point22,
+ int* result);
+
+CVAPI(void) icvGetMiddleAnglePoint( CvPoint2D64f basePoint,
+ CvPoint2D64f point1,CvPoint2D64f point2,
+ CvPoint2D64f* midPoint);
+
+CVAPI(void) icvGetNormalDirect(double* direct,CvPoint2D64f point,double* normDirect);
+
+CVAPI(double) icvGetVect(CvPoint2D64f basePoint,CvPoint2D64f point1,CvPoint2D64f point2);
+
+CVAPI(void) icvProjectPointToDirect( CvPoint2D64f point,double* lineCoeff,
+ CvPoint2D64f* projectPoint);
+
+CVAPI(void) icvGetDistanceFromPointToDirect( CvPoint2D64f point,double* lineCoef,double*dist);
+
+CVAPI(IplImage*) icvCreateIsometricImage( IplImage* src, IplImage* dst,
+ int desired_depth, int desired_num_channels );
+
+CVAPI(void) cvDeInterlace( const CvArr* frame, CvArr* fieldEven, CvArr* fieldOdd );
+
+/*CVAPI(int) icvSelectBestRt( int numImages,
+ int* numPoints,
+ CvSize imageSize,
+ CvPoint2D32f* imagePoints1,
+ CvPoint2D32f* imagePoints2,
+ CvPoint3D32f* objectPoints,
+
+ CvMatr32f cameraMatrix1,
+ CvVect32f distortion1,
+ CvMatr32f rotMatrs1,
+ CvVect32f transVects1,
+
+ CvMatr32f cameraMatrix2,
+ CvVect32f distortion2,
+ CvMatr32f rotMatrs2,
+ CvVect32f transVects2,
+
+ CvMatr32f bestRotMatr,
+ CvVect32f bestTransVect
+ );*/
+
+
+/****************************************************************************************\
+* Contour Tree *
+\****************************************************************************************/
+
+/* Contour tree header */
+typedef struct CvContourTree
+{
+ CV_SEQUENCE_FIELDS()
+ CvPoint p1; /* the first point of the binary tree root segment */
+ CvPoint p2; /* the last point of the binary tree root segment */
+} CvContourTree;
+
+/* Builds hierarhical representation of a contour */
+CVAPI(CvContourTree*) cvCreateContourTree( const CvSeq* contour,
+ CvMemStorage* storage,
+ double threshold );
+
+/* Reconstruct (completelly or partially) contour a from contour tree */
+CVAPI(CvSeq*) cvContourFromContourTree( const CvContourTree* tree,
+ CvMemStorage* storage,
+ CvTermCriteria criteria );
+
+/* Compares two contour trees */
+enum { CV_CONTOUR_TREES_MATCH_I1 = 1 };
+
+CVAPI(double) cvMatchContourTrees( const CvContourTree* tree1,
+ const CvContourTree* tree2,
+ int method, double threshold );
+
+/****************************************************************************************\
+* Contour Morphing *
+\****************************************************************************************/
+
+/* finds correspondence between two contours */
+CvSeq* cvCalcContoursCorrespondence( const CvSeq* contour1,
+ const CvSeq* contour2,
+ CvMemStorage* storage);
+
+/* morphs contours using the pre-calculated correspondence:
+ alpha=0 ~ contour1, alpha=1 ~ contour2 */
+CvSeq* cvMorphContours( const CvSeq* contour1, const CvSeq* contour2,
+ CvSeq* corr, double alpha,
+ CvMemStorage* storage );
+
+
+/****************************************************************************************\
+* Active Contours *
+\****************************************************************************************/
+
+#define CV_VALUE 1
+#define CV_ARRAY 2
+/* Updates active contour in order to minimize its cummulative
+ (internal and external) energy. */
+CVAPI(void) cvSnakeImage( const IplImage* image, CvPoint* points,
+ int length, float* alpha,
+ float* beta, float* gamma,
+ int coeff_usage, CvSize win,
+ CvTermCriteria criteria, int calc_gradient CV_DEFAULT(1));
+
+/****************************************************************************************\
+* Texture Descriptors *
+\****************************************************************************************/
+
+#define CV_GLCM_OPTIMIZATION_NONE -2
+#define CV_GLCM_OPTIMIZATION_LUT -1
+#define CV_GLCM_OPTIMIZATION_HISTOGRAM 0
+
+#define CV_GLCMDESC_OPTIMIZATION_ALLOWDOUBLENEST 10
+#define CV_GLCMDESC_OPTIMIZATION_ALLOWTRIPLENEST 11
+#define CV_GLCMDESC_OPTIMIZATION_HISTOGRAM 4
+
+#define CV_GLCMDESC_ENTROPY 0
+#define CV_GLCMDESC_ENERGY 1
+#define CV_GLCMDESC_HOMOGENITY 2
+#define CV_GLCMDESC_CONTRAST 3
+#define CV_GLCMDESC_CLUSTERTENDENCY 4
+#define CV_GLCMDESC_CLUSTERSHADE 5
+#define CV_GLCMDESC_CORRELATION 6
+#define CV_GLCMDESC_CORRELATIONINFO1 7
+#define CV_GLCMDESC_CORRELATIONINFO2 8
+#define CV_GLCMDESC_MAXIMUMPROBABILITY 9
+
+#define CV_GLCM_ALL 0
+#define CV_GLCM_GLCM 1
+#define CV_GLCM_DESC 2
+
+typedef struct CvGLCM CvGLCM;
+
+CVAPI(CvGLCM*) cvCreateGLCM( const IplImage* srcImage,
+ int stepMagnitude,
+ const int* stepDirections CV_DEFAULT(0),
+ int numStepDirections CV_DEFAULT(0),
+ int optimizationType CV_DEFAULT(CV_GLCM_OPTIMIZATION_NONE));
+
+CVAPI(void) cvReleaseGLCM( CvGLCM** GLCM, int flag CV_DEFAULT(CV_GLCM_ALL));
+
+CVAPI(void) cvCreateGLCMDescriptors( CvGLCM* destGLCM,
+ int descriptorOptimizationType
+ CV_DEFAULT(CV_GLCMDESC_OPTIMIZATION_ALLOWDOUBLENEST));
+
+CVAPI(double) cvGetGLCMDescriptor( CvGLCM* GLCM, int step, int descriptor );
+
+CVAPI(void) cvGetGLCMDescriptorStatistics( CvGLCM* GLCM, int descriptor,
+ double* average, double* standardDeviation );
+
+CVAPI(IplImage*) cvCreateGLCMImage( CvGLCM* GLCM, int step );
+
+/****************************************************************************************\
+* Face eyes&mouth tracking *
+\****************************************************************************************/
+
+
+typedef struct CvFaceTracker CvFaceTracker;
+
+#define CV_NUM_FACE_ELEMENTS 3
+enum CV_FACE_ELEMENTS
+{
+ CV_FACE_MOUTH = 0,
+ CV_FACE_LEFT_EYE = 1,
+ CV_FACE_RIGHT_EYE = 2
+};
+
+CVAPI(CvFaceTracker*) cvInitFaceTracker(CvFaceTracker* pFaceTracking, const IplImage* imgGray,
+ CvRect* pRects, int nRects);
+CVAPI(int) cvTrackFace( CvFaceTracker* pFaceTracker, IplImage* imgGray,
+ CvRect* pRects, int nRects,
+ CvPoint* ptRotate, double* dbAngleRotate);
+CVAPI(void) cvReleaseFaceTracker(CvFaceTracker** ppFaceTracker);
+
+
+typedef struct CvFace
+{
+ CvRect MouthRect;
+ CvRect LeftEyeRect;
+ CvRect RightEyeRect;
+} CvFaceData;
+
+CvSeq * cvFindFace(IplImage * Image,CvMemStorage* storage);
+CvSeq * cvPostBoostingFindFace(IplImage * Image,CvMemStorage* storage);
+
+
+/****************************************************************************************\
+* 3D Tracker *
+\****************************************************************************************/
+
+typedef unsigned char CvBool;
+
+typedef struct Cv3dTracker2dTrackedObject
+{
+ int id;
+ CvPoint2D32f p; // pgruebele: So we do not loose precision, this needs to be float
+} Cv3dTracker2dTrackedObject;
+
+CV_INLINE Cv3dTracker2dTrackedObject cv3dTracker2dTrackedObject(int id, CvPoint2D32f p)
+{
+ Cv3dTracker2dTrackedObject r;
+ r.id = id;
+ r.p = p;
+ return r;
+}
+
+typedef struct Cv3dTrackerTrackedObject
+{
+ int id;
+ CvPoint3D32f p; // location of the tracked object
+} Cv3dTrackerTrackedObject;
+
+CV_INLINE Cv3dTrackerTrackedObject cv3dTrackerTrackedObject(int id, CvPoint3D32f p)
+{
+ Cv3dTrackerTrackedObject r;
+ r.id = id;
+ r.p = p;
+ return r;
+}
+
+typedef struct Cv3dTrackerCameraInfo
+{
+ CvBool valid;
+ float mat[4][4]; /* maps camera coordinates to world coordinates */
+ CvPoint2D32f principal_point; /* copied from intrinsics so this structure */
+ /* has all the info we need */
+} Cv3dTrackerCameraInfo;
+
+typedef struct Cv3dTrackerCameraIntrinsics
+{
+ CvPoint2D32f principal_point;
+ float focal_length[2];
+ float distortion[4];
+} Cv3dTrackerCameraIntrinsics;
+
+CVAPI(CvBool) cv3dTrackerCalibrateCameras(int num_cameras,
+ const Cv3dTrackerCameraIntrinsics camera_intrinsics[], /* size is num_cameras */
+ CvSize etalon_size,
+ float square_size,
+ IplImage *samples[], /* size is num_cameras */
+ Cv3dTrackerCameraInfo camera_info[]); /* size is num_cameras */
+
+CVAPI(int) cv3dTrackerLocateObjects(int num_cameras, int num_objects,
+ const Cv3dTrackerCameraInfo camera_info[], /* size is num_cameras */
+ const Cv3dTracker2dTrackedObject tracking_info[], /* size is num_objects*num_cameras */
+ Cv3dTrackerTrackedObject tracked_objects[]); /* size is num_objects */
+/****************************************************************************************
+ tracking_info is a rectangular array; one row per camera, num_objects elements per row.
+ The id field of any unused slots must be -1. Ids need not be ordered or consecutive. On
+ completion, the return value is the number of objects located; i.e., the number of objects
+ visible by more than one camera. The id field of any unused slots in tracked objects is
+ set to -1.
+****************************************************************************************/
+
+
+/****************************************************************************************\
+* Skeletons and Linear-Contour Models *
+\****************************************************************************************/
+
+typedef enum CvLeeParameters
+{
+ CV_LEE_INT = 0,
+ CV_LEE_FLOAT = 1,
+ CV_LEE_DOUBLE = 2,
+ CV_LEE_AUTO = -1,
+ CV_LEE_ERODE = 0,
+ CV_LEE_ZOOM = 1,
+ CV_LEE_NON = 2
+} CvLeeParameters;
+
+#define CV_NEXT_VORONOISITE2D( SITE ) ((SITE)->edge[0]->site[((SITE)->edge[0]->site[0] == (SITE))])
+#define CV_PREV_VORONOISITE2D( SITE ) ((SITE)->edge[1]->site[((SITE)->edge[1]->site[0] == (SITE))])
+#define CV_FIRST_VORONOIEDGE2D( SITE ) ((SITE)->edge[0])
+#define CV_LAST_VORONOIEDGE2D( SITE ) ((SITE)->edge[1])
+#define CV_NEXT_VORONOIEDGE2D( EDGE, SITE ) ((EDGE)->next[(EDGE)->site[0] != (SITE)])
+#define CV_PREV_VORONOIEDGE2D( EDGE, SITE ) ((EDGE)->next[2 + ((EDGE)->site[0] != (SITE))])
+#define CV_VORONOIEDGE2D_BEGINNODE( EDGE, SITE ) ((EDGE)->node[((EDGE)->site[0] != (SITE))])
+#define CV_VORONOIEDGE2D_ENDNODE( EDGE, SITE ) ((EDGE)->node[((EDGE)->site[0] == (SITE))])
+#define CV_TWIN_VORONOISITE2D( SITE, EDGE ) ( (EDGE)->site[((EDGE)->site[0] == (SITE))])
+
+#define CV_VORONOISITE2D_FIELDS() \
+ struct CvVoronoiNode2D *node[2]; \
+ struct CvVoronoiEdge2D *edge[2];
+
+typedef struct CvVoronoiSite2D
+{
+ CV_VORONOISITE2D_FIELDS()
+ struct CvVoronoiSite2D *next[2];
+} CvVoronoiSite2D;
+
+#define CV_VORONOIEDGE2D_FIELDS() \
+ struct CvVoronoiNode2D *node[2]; \
+ struct CvVoronoiSite2D *site[2]; \
+ struct CvVoronoiEdge2D *next[4];
+
+typedef struct CvVoronoiEdge2D
+{
+ CV_VORONOIEDGE2D_FIELDS()
+} CvVoronoiEdge2D;
+
+#define CV_VORONOINODE2D_FIELDS() \
+ CV_SET_ELEM_FIELDS(CvVoronoiNode2D) \
+ CvPoint2D32f pt; \
+ float radius;
+
+typedef struct CvVoronoiNode2D
+{
+ CV_VORONOINODE2D_FIELDS()
+} CvVoronoiNode2D;
+
+#define CV_VORONOIDIAGRAM2D_FIELDS() \
+ CV_GRAPH_FIELDS() \
+ CvSet *sites;
+
+typedef struct CvVoronoiDiagram2D
+{
+ CV_VORONOIDIAGRAM2D_FIELDS()
+} CvVoronoiDiagram2D;
+
+/* Computes Voronoi Diagram for given polygons with holes */
+CVAPI(int) cvVoronoiDiagramFromContour(CvSeq* ContourSeq,
+ CvVoronoiDiagram2D** VoronoiDiagram,
+ CvMemStorage* VoronoiStorage,
+ CvLeeParameters contour_type CV_DEFAULT(CV_LEE_INT),
+ int contour_orientation CV_DEFAULT(-1),
+ int attempt_number CV_DEFAULT(10));
+
+/* Computes Voronoi Diagram for domains in given image */
+CVAPI(int) cvVoronoiDiagramFromImage(IplImage* pImage,
+ CvSeq** ContourSeq,
+ CvVoronoiDiagram2D** VoronoiDiagram,
+ CvMemStorage* VoronoiStorage,
+ CvLeeParameters regularization_method CV_DEFAULT(CV_LEE_NON),
+ float approx_precision CV_DEFAULT(CV_LEE_AUTO));
+
+/* Deallocates the storage */
+CVAPI(void) cvReleaseVoronoiStorage(CvVoronoiDiagram2D* VoronoiDiagram,
+ CvMemStorage** pVoronoiStorage);
+
+/*********************** Linear-Contour Model ****************************/
+
+struct CvLCMEdge;
+struct CvLCMNode;
+
+typedef struct CvLCMEdge
+{
+ CV_GRAPH_EDGE_FIELDS()
+ CvSeq* chain;
+ float width;
+ int index1;
+ int index2;
+} CvLCMEdge;
+
+typedef struct CvLCMNode
+{
+ CV_GRAPH_VERTEX_FIELDS()
+ CvContour* contour;
+} CvLCMNode;
+
+
+/* Computes hybrid model from Voronoi Diagram */
+CVAPI(CvGraph*) cvLinearContorModelFromVoronoiDiagram(CvVoronoiDiagram2D* VoronoiDiagram,
+ float maxWidth);
+
+/* Releases hybrid model storage */
+CVAPI(int) cvReleaseLinearContorModelStorage(CvGraph** Graph);
+
+
+/* two stereo-related functions */
+
+CVAPI(void) cvInitPerspectiveTransform( CvSize size, const CvPoint2D32f vertex[4], double matrix[3][3],
+ CvArr* rectMap );
+
+/*CVAPI(void) cvInitStereoRectification( CvStereoCamera* params,
+ CvArr* rectMap1, CvArr* rectMap2,
+ int do_undistortion );*/
+
+/*************************** View Morphing Functions ************************/
+
+typedef struct CvMatrix3
+{
+ float m[3][3];
+} CvMatrix3;
+
+/* The order of the function corresponds to the order they should appear in
+ the view morphing pipeline */
+
+/* Finds ending points of scanlines on left and right images of stereo-pair */
+CVAPI(void) cvMakeScanlines( const CvMatrix3* matrix, CvSize img_size,
+ int* scanlines1, int* scanlines2,
+ int* lengths1, int* lengths2,
+ int* line_count );
+
+/* Grab pixel values from scanlines and stores them sequentially
+ (some sort of perspective image transform) */
+CVAPI(void) cvPreWarpImage( int line_count,
+ IplImage* img,
+ uchar* dst,
+ int* dst_nums,
+ int* scanlines);
+
+/* Approximate each grabbed scanline by a sequence of runs
+ (lossy run-length compression) */
+CVAPI(void) cvFindRuns( int line_count,
+ uchar* prewarp1,
+ uchar* prewarp2,
+ int* line_lengths1,
+ int* line_lengths2,
+ int* runs1,
+ int* runs2,
+ int* num_runs1,
+ int* num_runs2);
+
+/* Compares two sets of compressed scanlines */
+CVAPI(void) cvDynamicCorrespondMulti( int line_count,
+ int* first,
+ int* first_runs,
+ int* second,
+ int* second_runs,
+ int* first_corr,
+ int* second_corr);
+
+/* Finds scanline ending coordinates for some intermediate "virtual" camera position */
+CVAPI(void) cvMakeAlphaScanlines( int* scanlines1,
+ int* scanlines2,
+ int* scanlinesA,
+ int* lengths,
+ int line_count,
+ float alpha);
+
+/* Blends data of the left and right image scanlines to get
+ pixel values of "virtual" image scanlines */
+CVAPI(void) cvMorphEpilinesMulti( int line_count,
+ uchar* first_pix,
+ int* first_num,
+ uchar* second_pix,
+ int* second_num,
+ uchar* dst_pix,
+ int* dst_num,
+ float alpha,
+ int* first,
+ int* first_runs,
+ int* second,
+ int* second_runs,
+ int* first_corr,
+ int* second_corr);
+
+/* Does reverse warping of the morphing result to make
+ it fill the destination image rectangle */
+CVAPI(void) cvPostWarpImage( int line_count,
+ uchar* src,
+ int* src_nums,
+ IplImage* img,
+ int* scanlines);
+
+/* Deletes Moire (missed pixels that appear due to discretization) */
+CVAPI(void) cvDeleteMoire( IplImage* img );
+
+
+typedef struct CvConDensation
+{
+ int MP;
+ int DP;
+ float* DynamMatr; /* Matrix of the linear Dynamics system */
+ float* State; /* Vector of State */
+ int SamplesNum; /* Number of the Samples */
+ float** flSamples; /* arr of the Sample Vectors */
+ float** flNewSamples; /* temporary array of the Sample Vectors */
+ float* flConfidence; /* Confidence for each Sample */
+ float* flCumulative; /* Cumulative confidence */
+ float* Temp; /* Temporary vector */
+ float* RandomSample; /* RandomVector to update sample set */
+ struct CvRandState* RandS; /* Array of structures to generate random vectors */
+} CvConDensation;
+
+/* Creates ConDensation filter state */
+CVAPI(CvConDensation*) cvCreateConDensation( int dynam_params,
+ int measure_params,
+ int sample_count );
+
+/* Releases ConDensation filter state */
+CVAPI(void) cvReleaseConDensation( CvConDensation** condens );
+
+/* Updates ConDensation filter by time (predict future state of the system) */
+CVAPI(void) cvConDensUpdateByTime( CvConDensation* condens);
+
+/* Initializes ConDensation filter samples */
+CVAPI(void) cvConDensInitSampleSet( CvConDensation* condens, CvMat* lower_bound, CvMat* upper_bound );
+
+CV_INLINE int iplWidth( const IplImage* img )
+{
+ return !img ? 0 : !img->roi ? img->width : img->roi->width;
+}
+
+CV_INLINE int iplHeight( const IplImage* img )
+{
+ return !img ? 0 : !img->roi ? img->height : img->roi->height;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#ifdef __cplusplus
+
+/****************************************************************************************\
+* Calibration engine *
+\****************************************************************************************/
+
+typedef enum CvCalibEtalonType
+{
+ CV_CALIB_ETALON_USER = -1,
+ CV_CALIB_ETALON_CHESSBOARD = 0,
+ CV_CALIB_ETALON_CHECKERBOARD = CV_CALIB_ETALON_CHESSBOARD
+}
+CvCalibEtalonType;
+
+class CV_EXPORTS CvCalibFilter
+{
+public:
+ /* Constructor & destructor */
+ CvCalibFilter();
+ virtual ~CvCalibFilter();
+
+ /* Sets etalon type - one for all cameras.
+ etalonParams is used in case of pre-defined etalons (such as chessboard).
+ Number of elements in etalonParams is determined by etalonType.
+ E.g., if etalon type is CV_ETALON_TYPE_CHESSBOARD then:
+ etalonParams[0] is number of squares per one side of etalon
+ etalonParams[1] is number of squares per another side of etalon
+ etalonParams[2] is linear size of squares in the board in arbitrary units.
+ pointCount & points are used in case of
+ CV_CALIB_ETALON_USER (user-defined) etalon. */
+ virtual bool
+ SetEtalon( CvCalibEtalonType etalonType, double* etalonParams,
+ int pointCount = 0, CvPoint2D32f* points = 0 );
+
+ /* Retrieves etalon parameters/or and points */
+ virtual CvCalibEtalonType
+ GetEtalon( int* paramCount = 0, const double** etalonParams = 0,
+ int* pointCount = 0, const CvPoint2D32f** etalonPoints = 0 ) const;
+
+ /* Sets number of cameras calibrated simultaneously. It is equal to 1 initially */
+ virtual void SetCameraCount( int cameraCount );
+
+ /* Retrieves number of cameras */
+ int GetCameraCount() const { return cameraCount; }
+
+ /* Starts cameras calibration */
+ virtual bool SetFrames( int totalFrames );
+
+ /* Stops cameras calibration */
+ virtual void Stop( bool calibrate = false );
+
+ /* Retrieves number of cameras */
+ bool IsCalibrated() const { return isCalibrated; }
+
+ /* Feeds another serie of snapshots (one per each camera) to filter.
+ Etalon points on these images are found automatically.
+ If the function can't locate points, it returns false */
+ virtual bool FindEtalon( IplImage** imgs );
+
+ /* The same but takes matrices */
+ virtual bool FindEtalon( CvMat** imgs );
+
+ /* Lower-level function for feeding filter with already found etalon points.
+ Array of point arrays for each camera is passed. */
+ virtual bool Push( const CvPoint2D32f** points = 0 );
+
+ /* Returns total number of accepted frames and, optionally,
+ total number of frames to collect */
+ virtual int GetFrameCount( int* framesTotal = 0 ) const;
+
+ /* Retrieves camera parameters for specified camera.
+ If camera is not calibrated the function returns 0 */
+ virtual const CvCamera* GetCameraParams( int idx = 0 ) const;
+
+ virtual const CvStereoCamera* GetStereoParams() const;
+
+ /* Sets camera parameters for all cameras */
+ virtual bool SetCameraParams( CvCamera* params );
+
+ /* Saves all camera parameters to file */
+ virtual bool SaveCameraParams( const char* filename );
+
+ /* Loads all camera parameters from file */
+ virtual bool LoadCameraParams( const char* filename );
+
+ /* Undistorts images using camera parameters. Some of src pointers can be NULL. */
+ virtual bool Undistort( IplImage** src, IplImage** dst );
+
+ /* Undistorts images using camera parameters. Some of src pointers can be NULL. */
+ virtual bool Undistort( CvMat** src, CvMat** dst );
+
+ /* Returns array of etalon points detected/partally detected
+ on the latest frame for idx-th camera */
+ virtual bool GetLatestPoints( int idx, CvPoint2D32f** pts,
+ int* count, bool* found );
+
+ /* Draw the latest detected/partially detected etalon */
+ virtual void DrawPoints( IplImage** dst );
+
+ /* Draw the latest detected/partially detected etalon */
+ virtual void DrawPoints( CvMat** dst );
+
+ virtual bool Rectify( IplImage** srcarr, IplImage** dstarr );
+ virtual bool Rectify( CvMat** srcarr, CvMat** dstarr );
+
+protected:
+
+ enum { MAX_CAMERAS = 3 };
+
+ /* etalon data */
+ CvCalibEtalonType etalonType;
+ int etalonParamCount;
+ double* etalonParams;
+ int etalonPointCount;
+ CvPoint2D32f* etalonPoints;
+ CvSize imgSize;
+ CvMat* grayImg;
+ CvMat* tempImg;
+ CvMemStorage* storage;
+
+ /* camera data */
+ int cameraCount;
+ CvCamera cameraParams[MAX_CAMERAS];
+ CvStereoCamera stereo;
+ CvPoint2D32f* points[MAX_CAMERAS];
+ CvMat* undistMap[MAX_CAMERAS][2];
+ CvMat* undistImg;
+ int latestCounts[MAX_CAMERAS];
+ CvPoint2D32f* latestPoints[MAX_CAMERAS];
+ CvMat* rectMap[MAX_CAMERAS][2];
+
+ /* Added by Valery */
+ //CvStereoCamera stereoParams;
+
+ int maxPoints;
+ int framesTotal;
+ int framesAccepted;
+ bool isCalibrated;
+};
+
+#include <iosfwd>
+#include <limits>
+
+class CV_EXPORTS CvImage
+{
+public:
+ CvImage() : image(0), refcount(0) {}
+ CvImage( CvSize _size, int _depth, int _channels )
+ {
+ image = cvCreateImage( _size, _depth, _channels );
+ refcount = image ? new int(1) : 0;
+ }
+
+ CvImage( IplImage* img ) : image(img)
+ {
+ refcount = image ? new int(1) : 0;
+ }
+
+ CvImage( const CvImage& img ) : image(img.image), refcount(img.refcount)
+ {
+ if( refcount ) ++(*refcount);
+ }
+
+ CvImage( const char* filename, const char* imgname=0, int color=-1 ) : image(0), refcount(0)
+ { load( filename, imgname, color ); }
+
+ CvImage( CvFileStorage* fs, const char* mapname, const char* imgname ) : image(0), refcount(0)
+ { read( fs, mapname, imgname ); }
+
+ CvImage( CvFileStorage* fs, const char* seqname, int idx ) : image(0), refcount(0)
+ { read( fs, seqname, idx ); }
+
+ ~CvImage()
+ {
+ if( refcount && !(--*refcount) )
+ {
+ cvReleaseImage( &image );
+ delete refcount;
+ }
+ }
+
+ CvImage clone() { return CvImage(image ? cvCloneImage(image) : 0); }
+
+ void create( CvSize _size, int _depth, int _channels )
+ {
+ if( !image || !refcount ||
+ image->width != _size.width || image->height != _size.height ||
+ image->depth != _depth || image->nChannels != _channels )
+ attach( cvCreateImage( _size, _depth, _channels ));
+ }
+
+ void release() { detach(); }
+ void clear() { detach(); }
+
+ void attach( IplImage* img, bool use_refcount=true )
+ {
+ if( refcount && --*refcount == 0 )
+ {
+ cvReleaseImage( &image );
+ delete refcount;
+ }
+ image = img;
+ refcount = use_refcount && image ? new int(1) : 0;
+ }
+
+ void detach()
+ {
+ if( refcount && --*refcount == 0 )
+ {
+ cvReleaseImage( &image );
+ delete refcount;
+ }
+ image = 0;
+ refcount = 0;
+ }
+
+ bool load( const char* filename, const char* imgname=0, int color=-1 );
+ bool read( CvFileStorage* fs, const char* mapname, const char* imgname );
+ bool read( CvFileStorage* fs, const char* seqname, int idx );
+ void save( const char* filename, const char* imgname, const int* params=0 );
+ void write( CvFileStorage* fs, const char* imgname );
+
+ void show( const char* window_name );
+ bool is_valid() { return image != 0; }
+
+ int width() const { return image ? image->width : 0; }
+ int height() const { return image ? image->height : 0; }
+
+ CvSize size() const { return image ? cvSize(image->width, image->height) : cvSize(0,0); }
+
+ CvSize roi_size() const
+ {
+ return !image ? cvSize(0,0) :
+ !image->roi ? cvSize(image->width,image->height) :
+ cvSize(image->roi->width, image->roi->height);
+ }
+
+ CvRect roi() const
+ {
+ return !image ? cvRect(0,0,0,0) :
+ !image->roi ? cvRect(0,0,image->width,image->height) :
+ cvRect(image->roi->xOffset,image->roi->yOffset,
+ image->roi->width,image->roi->height);
+ }
+
+ int coi() const { return !image || !image->roi ? 0 : image->roi->coi; }
+
+ void set_roi(CvRect _roi) { cvSetImageROI(image,_roi); }
+ void reset_roi() { cvResetImageROI(image); }
+ void set_coi(int _coi) { cvSetImageCOI(image,_coi); }
+ int depth() const { return image ? image->depth : 0; }
+ int channels() const { return image ? image->nChannels : 0; }
+ int pix_size() const { return image ? ((image->depth & 255)>>3)*image->nChannels : 0; }
+
+ uchar* data() { return image ? (uchar*)image->imageData : 0; }
+ const uchar* data() const { return image ? (const uchar*)image->imageData : 0; }
+ int step() const { return image ? image->widthStep : 0; }
+ int origin() const { return image ? image->origin : 0; }
+
+ uchar* roi_row(int y)
+ {
+ assert(0<=y);
+ assert(!image ?
+ 1 : image->roi ?
+ y<image->roi->height : y<image->height);
+
+ return !image ? 0 :
+ !image->roi ?
+ (uchar*)(image->imageData + y*image->widthStep) :
+ (uchar*)(image->imageData + (y+image->roi->yOffset)*image->widthStep +
+ image->roi->xOffset*((image->depth & 255)>>3)*image->nChannels);
+ }
+
+ const uchar* roi_row(int y) const
+ {
+ assert(0<=y);
+ assert(!image ?
+ 1 : image->roi ?
+ y<image->roi->height : y<image->height);
+
+ return !image ? 0 :
+ !image->roi ?
+ (const uchar*)(image->imageData + y*image->widthStep) :
+ (const uchar*)(image->imageData + (y+image->roi->yOffset)*image->widthStep +
+ image->roi->xOffset*((image->depth & 255)>>3)*image->nChannels);
+ }
+
+ operator const IplImage* () const { return image; }
+ operator IplImage* () { return image; }
+
+ CvImage& operator = (const CvImage& img)
+ {
+ if( img.refcount )
+ ++*img.refcount;
+ if( refcount && !(--*refcount) )
+ cvReleaseImage( &image );
+ image=img.image;
+ refcount=img.refcount;
+ return *this;
+ }
+
+protected:
+ IplImage* image;
+ int* refcount;
+};
+
+
+class CV_EXPORTS CvMatrix
+{
+public:
+ CvMatrix() : matrix(0) {}
+ CvMatrix( int _rows, int _cols, int _type )
+ { matrix = cvCreateMat( _rows, _cols, _type ); }
+
+ CvMatrix( int _rows, int _cols, int _type, CvMat* hdr,
+ void* _data=0, int _step=CV_AUTOSTEP )
+ { matrix = cvInitMatHeader( hdr, _rows, _cols, _type, _data, _step ); }
+
+ CvMatrix( int rows, int cols, int type, CvMemStorage* storage, bool alloc_data=true );
+
+ CvMatrix( int _rows, int _cols, int _type, void* _data, int _step=CV_AUTOSTEP )
+ { matrix = cvCreateMatHeader( _rows, _cols, _type );
+ cvSetData( matrix, _data, _step ); }
+
+ CvMatrix( CvMat* m )
+ { matrix = m; }
+
+ CvMatrix( const CvMatrix& m )
+ {
+ matrix = m.matrix;
+ addref();
+ }
+
+ CvMatrix( const char* filename, const char* matname=0, int color=-1 ) : matrix(0)
+ { load( filename, matname, color ); }
+
+ CvMatrix( CvFileStorage* fs, const char* mapname, const char* matname ) : matrix(0)
+ { read( fs, mapname, matname ); }
+
+ CvMatrix( CvFileStorage* fs, const char* seqname, int idx ) : matrix(0)
+ { read( fs, seqname, idx ); }
+
+ ~CvMatrix()
+ {
+ release();
+ }
+
+ CvMatrix clone() { return CvMatrix(matrix ? cvCloneMat(matrix) : 0); }
+
+ void set( CvMat* m, bool add_ref )
+ {
+ release();
+ matrix = m;
+ if( add_ref )
+ addref();
+ }
+
+ void create( int _rows, int _cols, int _type )
+ {
+ if( !matrix || !matrix->refcount ||
+ matrix->rows != _rows || matrix->cols != _cols ||
+ CV_MAT_TYPE(matrix->type) != _type )
+ set( cvCreateMat( _rows, _cols, _type ), false );
+ }
+
+ void addref() const
+ {
+ if( matrix )
+ {
+ if( matrix->hdr_refcount )
+ ++matrix->hdr_refcount;
+ else if( matrix->refcount )
+ ++*matrix->refcount;
+ }
+ }
+
+ void release()
+ {
+ if( matrix )
+ {
+ if( matrix->hdr_refcount )
+ {
+ if( --matrix->hdr_refcount == 0 )
+ cvReleaseMat( &matrix );
+ }
+ else if( matrix->refcount )
+ {
+ if( --*matrix->refcount == 0 )
+ cvFree( &matrix->refcount );
+ }
+ matrix = 0;
+ }
+ }
+
+ void clear()
+ {
+ release();
+ }
+
+ bool load( const char* filename, const char* matname=0, int color=-1 );
+ bool read( CvFileStorage* fs, const char* mapname, const char* matname );
+ bool read( CvFileStorage* fs, const char* seqname, int idx );
+ void save( const char* filename, const char* matname, const int* params=0 );
+ void write( CvFileStorage* fs, const char* matname );
+
+ void show( const char* window_name );
+
+ bool is_valid() { return matrix != 0; }
+
+ int rows() const { return matrix ? matrix->rows : 0; }
+ int cols() const { return matrix ? matrix->cols : 0; }
+
+ CvSize size() const
+ {
+ return !matrix ? cvSize(0,0) : cvSize(matrix->rows,matrix->cols);
+ }
+
+ int type() const { return matrix ? CV_MAT_TYPE(matrix->type) : 0; }
+ int depth() const { return matrix ? CV_MAT_DEPTH(matrix->type) : 0; }
+ int channels() const { return matrix ? CV_MAT_CN(matrix->type) : 0; }
+ int pix_size() const { return matrix ? CV_ELEM_SIZE(matrix->type) : 0; }
+
+ uchar* data() { return matrix ? matrix->data.ptr : 0; }
+ const uchar* data() const { return matrix ? matrix->data.ptr : 0; }
+ int step() const { return matrix ? matrix->step : 0; }
+
+ void set_data( void* _data, int _step=CV_AUTOSTEP )
+ { cvSetData( matrix, _data, _step ); }
+
+ uchar* row(int i) { return !matrix ? 0 : matrix->data.ptr + i*matrix->step; }
+ const uchar* row(int i) const
+ { return !matrix ? 0 : matrix->data.ptr + i*matrix->step; }
+
+ operator const CvMat* () const { return matrix; }
+ operator CvMat* () { return matrix; }
+
+ CvMatrix& operator = (const CvMatrix& _m)
+ {
+ _m.addref();
+ release();
+ matrix = _m.matrix;
+ return *this;
+ }
+
+protected:
+ CvMat* matrix;
+};
+
+/****************************************************************************************\
+ * CamShiftTracker *
+ \****************************************************************************************/
+
+class CV_EXPORTS CvCamShiftTracker
+{
+public:
+
+ CvCamShiftTracker();
+ virtual ~CvCamShiftTracker();
+
+ /**** Characteristics of the object that are calculated by track_object method *****/
+ float get_orientation() const // orientation of the object in degrees
+ { return m_box.angle; }
+ float get_length() const // the larger linear size of the object
+ { return m_box.size.height; }
+ float get_width() const // the smaller linear size of the object
+ { return m_box.size.width; }
+ CvPoint2D32f get_center() const // center of the object
+ { return m_box.center; }
+ CvRect get_window() const // bounding rectangle for the object
+ { return m_comp.rect; }
+
+ /*********************** Tracking parameters ************************/
+ int get_threshold() const // thresholding value that applied to back project
+ { return m_threshold; }
+
+ int get_hist_dims( int* dims = 0 ) const // returns number of histogram dimensions and sets
+ { return m_hist ? cvGetDims( m_hist->bins, dims ) : 0; }
+
+ int get_min_ch_val( int channel ) const // get the minimum allowed value of the specified channel
+ { return m_min_ch_val[channel]; }
+
+ int get_max_ch_val( int channel ) const // get the maximum allowed value of the specified channel
+ { return m_max_ch_val[channel]; }
+
+ // set initial object rectangle (must be called before initial calculation of the histogram)
+ bool set_window( CvRect window)
+ { m_comp.rect = window; return true; }
+
+ bool set_threshold( int threshold ) // threshold applied to the histogram bins
+ { m_threshold = threshold; return true; }
+
+ bool set_hist_bin_range( int dim, int min_val, int max_val );
+
+ bool set_hist_dims( int c_dims, int* dims );// set the histogram parameters
+
+ bool set_min_ch_val( int channel, int val ) // set the minimum allowed value of the specified channel
+ { m_min_ch_val[channel] = val; return true; }
+ bool set_max_ch_val( int channel, int val ) // set the maximum allowed value of the specified channel
+ { m_max_ch_val[channel] = val; return true; }
+
+ /************************ The processing methods *********************************/
+ // update object position
+ virtual bool track_object( const IplImage* cur_frame );
+
+ // update object histogram
+ virtual bool update_histogram( const IplImage* cur_frame );
+
+ // reset histogram
+ virtual void reset_histogram();
+
+ /************************ Retrieving internal data *******************************/
+ // get back project image
+ virtual IplImage* get_back_project()
+ { return m_back_project; }
+
+ float query( int* bin ) const
+ { return m_hist ? (float)cvGetRealND(m_hist->bins, bin) : 0.f; }
+
+protected:
+
+ // internal method for color conversion: fills m_color_planes group
+ virtual void color_transform( const IplImage* img );
+
+ CvHistogram* m_hist;
+
+ CvBox2D m_box;
+ CvConnectedComp m_comp;
+
+ float m_hist_ranges_data[CV_MAX_DIM][2];
+ float* m_hist_ranges[CV_MAX_DIM];
+
+ int m_min_ch_val[CV_MAX_DIM];
+ int m_max_ch_val[CV_MAX_DIM];
+ int m_threshold;
+
+ IplImage* m_color_planes[CV_MAX_DIM];
+ IplImage* m_back_project;
+ IplImage* m_temp;
+ IplImage* m_mask;
+};
+
+/****************************************************************************************\
+* Expectation - Maximization *
+\****************************************************************************************/
+struct CV_EXPORTS_W_MAP CvEMParams
+{
+ CvEMParams();
+ CvEMParams( int nclusters, int cov_mat_type=cv::EM::COV_MAT_DIAGONAL,
+ int start_step=cv::EM::START_AUTO_STEP,
+ CvTermCriteria term_crit=cvTermCriteria(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 100, FLT_EPSILON),
+ const CvMat* probs=0, const CvMat* weights=0, const CvMat* means=0, const CvMat** covs=0 );
+
+ CV_PROP_RW int nclusters;
+ CV_PROP_RW int cov_mat_type;
+ CV_PROP_RW int start_step;
+ const CvMat* probs;
+ const CvMat* weights;
+ const CvMat* means;
+ const CvMat** covs;
+ CV_PROP_RW CvTermCriteria term_crit;
+};
+
+
+class CV_EXPORTS_W CvEM : public CvStatModel
+{
+public:
+ // Type of covariation matrices
+ enum { COV_MAT_SPHERICAL=cv::EM::COV_MAT_SPHERICAL,
+ COV_MAT_DIAGONAL =cv::EM::COV_MAT_DIAGONAL,
+ COV_MAT_GENERIC =cv::EM::COV_MAT_GENERIC };
+
+ // The initial step
+ enum { START_E_STEP=cv::EM::START_E_STEP,
+ START_M_STEP=cv::EM::START_M_STEP,
+ START_AUTO_STEP=cv::EM::START_AUTO_STEP };
+
+ CV_WRAP CvEM();
+ CvEM( const CvMat* samples, const CvMat* sampleIdx=0,
+ CvEMParams params=CvEMParams(), CvMat* labels=0 );
+
+ virtual ~CvEM();
+
+ virtual bool train( const CvMat* samples, const CvMat* sampleIdx=0,
+ CvEMParams params=CvEMParams(), CvMat* labels=0 );
+
+ virtual float predict( const CvMat* sample, CV_OUT CvMat* probs ) const;
+
+ CV_WRAP CvEM( const cv::Mat& samples, const cv::Mat& sampleIdx=cv::Mat(),
+ CvEMParams params=CvEMParams() );
+
+ CV_WRAP virtual bool train( const cv::Mat& samples,
+ const cv::Mat& sampleIdx=cv::Mat(),
+ CvEMParams params=CvEMParams(),
+ CV_OUT cv::Mat* labels=0 );
+
+ CV_WRAP virtual float predict( const cv::Mat& sample, CV_OUT cv::Mat* probs=0 ) const;
+ CV_WRAP virtual double calcLikelihood( const cv::Mat &sample ) const;
+
+ CV_WRAP int getNClusters() const;
+ CV_WRAP cv::Mat getMeans() const;
+ CV_WRAP void getCovs(CV_OUT std::vector<cv::Mat>& covs) const;
+ CV_WRAP cv::Mat getWeights() const;
+ CV_WRAP cv::Mat getProbs() const;
+
+ CV_WRAP inline double getLikelihood() const { return emObj.isTrained() ? logLikelihood : DBL_MAX; }
+
+ CV_WRAP virtual void clear();
+
+ int get_nclusters() const;
+ const CvMat* get_means() const;
+ const CvMat** get_covs() const;
+ const CvMat* get_weights() const;
+ const CvMat* get_probs() const;
+
+ inline double get_log_likelihood() const { return getLikelihood(); }
+
+ virtual void read( CvFileStorage* fs, CvFileNode* node );
+ virtual void write( CvFileStorage* fs, const char* name ) const;
+
+protected:
+ void set_mat_hdrs();
+
+ cv::EM emObj;
+ cv::Mat probs;
+ double logLikelihood;
+
+ CvMat meansHdr;
+ std::vector<CvMat> covsHdrs;
+ std::vector<CvMat*> covsPtrs;
+ CvMat weightsHdr;
+ CvMat probsHdr;
+};
+
+namespace cv
+{
+
+typedef CvEMParams EMParams;
+typedef CvEM ExpectationMaximization;
+
+/*!
+ The Patch Generator class
+ */
+class CV_EXPORTS PatchGenerator
+{
+public:
+ PatchGenerator();
+ PatchGenerator(double _backgroundMin, double _backgroundMax,
+ double _noiseRange, bool _randomBlur=true,
+ double _lambdaMin=0.6, double _lambdaMax=1.5,
+ double _thetaMin=-CV_PI, double _thetaMax=CV_PI,
+ double _phiMin=-CV_PI, double _phiMax=CV_PI );
+ void operator()(const Mat& image, Point2f pt, Mat& patch, Size patchSize, RNG& rng) const;
+ void operator()(const Mat& image, const Mat& transform, Mat& patch,
+ Size patchSize, RNG& rng) const;
+ void warpWholeImage(const Mat& image, Mat& matT, Mat& buf,
+ CV_OUT Mat& warped, int border, RNG& rng) const;
+ void generateRandomTransform(Point2f srcCenter, Point2f dstCenter,
+ CV_OUT Mat& transform, RNG& rng,
+ bool inverse=false) const;
+ void setAffineParam(double lambda, double theta, double phi);
+
+ double backgroundMin, backgroundMax;
+ double noiseRange;
+ bool randomBlur;
+ double lambdaMin, lambdaMax;
+ double thetaMin, thetaMax;
+ double phiMin, phiMax;
+};
+
+
+class CV_EXPORTS LDetector
+{
+public:
+ LDetector();
+ LDetector(int _radius, int _threshold, int _nOctaves,
+ int _nViews, double _baseFeatureSize, double _clusteringDistance);
+ void operator()(const Mat& image,
+ CV_OUT vector<KeyPoint>& keypoints,
+ int maxCount=0, bool scaleCoords=true) const;
+ void operator()(const vector<Mat>& pyr,
+ CV_OUT vector<KeyPoint>& keypoints,
+ int maxCount=0, bool scaleCoords=true) const;
+ void getMostStable2D(const Mat& image, CV_OUT vector<KeyPoint>& keypoints,
+ int maxCount, const PatchGenerator& patchGenerator) const;
+ void setVerbose(bool verbose);
+
+ void read(const FileNode& node);
+ void write(FileStorage& fs, const String& name=String()) const;
+
+ int radius;
+ int threshold;
+ int nOctaves;
+ int nViews;
+ bool verbose;
+
+ double baseFeatureSize;
+ double clusteringDistance;
+};
+
+typedef LDetector YAPE;
+
+class CV_EXPORTS FernClassifier
+{
+public:
+ FernClassifier();
+ FernClassifier(const FileNode& node);
+ FernClassifier(const vector<vector<Point2f> >& points,
+ const vector<Mat>& refimgs,
+ const vector<vector<int> >& labels=vector<vector<int> >(),
+ int _nclasses=0, int _patchSize=PATCH_SIZE,
+ int _signatureSize=DEFAULT_SIGNATURE_SIZE,
+ int _nstructs=DEFAULT_STRUCTS,
+ int _structSize=DEFAULT_STRUCT_SIZE,
+ int _nviews=DEFAULT_VIEWS,
+ int _compressionMethod=COMPRESSION_NONE,
+ const PatchGenerator& patchGenerator=PatchGenerator());
+ virtual ~FernClassifier();
+ virtual void read(const FileNode& n);
+ virtual void write(FileStorage& fs, const String& name=String()) const;
+ virtual void trainFromSingleView(const Mat& image,
+ const vector<KeyPoint>& keypoints,
+ int _patchSize=PATCH_SIZE,
+ int _signatureSize=DEFAULT_SIGNATURE_SIZE,
+ int _nstructs=DEFAULT_STRUCTS,
+ int _structSize=DEFAULT_STRUCT_SIZE,
+ int _nviews=DEFAULT_VIEWS,
+ int _compressionMethod=COMPRESSION_NONE,
+ const PatchGenerator& patchGenerator=PatchGenerator());
+ virtual void train(const vector<vector<Point2f> >& points,
+ const vector<Mat>& refimgs,
+ const vector<vector<int> >& labels=vector<vector<int> >(),
+ int _nclasses=0, int _patchSize=PATCH_SIZE,
+ int _signatureSize=DEFAULT_SIGNATURE_SIZE,
+ int _nstructs=DEFAULT_STRUCTS,
+ int _structSize=DEFAULT_STRUCT_SIZE,
+ int _nviews=DEFAULT_VIEWS,
+ int _compressionMethod=COMPRESSION_NONE,
+ const PatchGenerator& patchGenerator=PatchGenerator());
+ virtual int operator()(const Mat& img, Point2f kpt, vector<float>& signature) const;
+ virtual int operator()(const Mat& patch, vector<float>& signature) const;
+ virtual void clear();
+ virtual bool empty() const;
+ void setVerbose(bool verbose);
+
+ int getClassCount() const;
+ int getStructCount() const;
+ int getStructSize() const;
+ int getSignatureSize() const;
+ int getCompressionMethod() const;
+ Size getPatchSize() const;
+
+ struct Feature
+ {
+ uchar x1, y1, x2, y2;
+ Feature() : x1(0), y1(0), x2(0), y2(0) {}
+ Feature(int _x1, int _y1, int _x2, int _y2)
+ : x1((uchar)_x1), y1((uchar)_y1), x2((uchar)_x2), y2((uchar)_y2)
+ {}
+ template<typename _Tp> bool operator ()(const Mat_<_Tp>& patch) const
+ { return patch(y1,x1) > patch(y2, x2); }
+ };
+
+ enum
+ {
+ PATCH_SIZE = 31,
+ DEFAULT_STRUCTS = 50,
+ DEFAULT_STRUCT_SIZE = 9,
+ DEFAULT_VIEWS = 5000,
+ DEFAULT_SIGNATURE_SIZE = 176,
+ COMPRESSION_NONE = 0,
+ COMPRESSION_RANDOM_PROJ = 1,
+ COMPRESSION_PCA = 2,
+ DEFAULT_COMPRESSION_METHOD = COMPRESSION_NONE
+ };
+
+protected:
+ virtual void prepare(int _nclasses, int _patchSize, int _signatureSize,
+ int _nstructs, int _structSize,
+ int _nviews, int _compressionMethod);
+ virtual void finalize(RNG& rng);
+ virtual int getLeaf(int fidx, const Mat& patch) const;
+
+ bool verbose;
+ int nstructs;
+ int structSize;
+ int nclasses;
+ int signatureSize;
+ int compressionMethod;
+ int leavesPerStruct;
+ Size patchSize;
+ vector<Feature> features;
+ vector<int> classCounters;
+ vector<float> posteriors;
+};
+
+
+/****************************************************************************************\
+ * Calonder Classifier *
+ \****************************************************************************************/
+
+struct RTreeNode;
+
+struct CV_EXPORTS BaseKeypoint
+{
+ int x;
+ int y;
+ IplImage* image;
+
+ BaseKeypoint()
+ : x(0), y(0), image(NULL)
+ {}
+
+ BaseKeypoint(int _x, int _y, IplImage* _image)
+ : x(_x), y(_y), image(_image)
+ {}
+};
+
+class CV_EXPORTS RandomizedTree
+{
+public:
+ friend class RTreeClassifier;
+
+ static const uchar PATCH_SIZE = 32;
+ static const int DEFAULT_DEPTH = 9;
+ static const int DEFAULT_VIEWS = 5000;
+ static const size_t DEFAULT_REDUCED_NUM_DIM = 176;
+ static float GET_LOWER_QUANT_PERC() { return .03f; }
+ static float GET_UPPER_QUANT_PERC() { return .92f; }
+
+ RandomizedTree();
+ ~RandomizedTree();
+
+ void train(vector<BaseKeypoint> const& base_set, RNG &rng,
+ int depth, int views, size_t reduced_num_dim, int num_quant_bits);
+ void train(vector<BaseKeypoint> const& base_set, RNG &rng,
+ PatchGenerator &make_patch, int depth, int views, size_t reduced_num_dim,
+ int num_quant_bits);
+
+ // following two funcs are EXPERIMENTAL (do not use unless you know exactly what you do)
+ static void quantizeVector(float *vec, int dim, int N, float bnds[2], int clamp_mode=0);
+ static void quantizeVector(float *src, int dim, int N, float bnds[2], uchar *dst);
+
+ // patch_data must be a 32x32 array (no row padding)
+ float* getPosterior(uchar* patch_data);
+ const float* getPosterior(uchar* patch_data) const;
+ uchar* getPosterior2(uchar* patch_data);
+ const uchar* getPosterior2(uchar* patch_data) const;
+
+ void read(const char* file_name, int num_quant_bits);
+ void read(std::istream &is, int num_quant_bits);
+ void write(const char* file_name) const;
+ void write(std::ostream &os) const;
+
+ int classes() { return classes_; }
+ int depth() { return depth_; }
+
+ //void setKeepFloatPosteriors(bool b) { keep_float_posteriors_ = b; }
+ void discardFloatPosteriors() { freePosteriors(1); }
+
+ inline void applyQuantization(int num_quant_bits) { makePosteriors2(num_quant_bits); }
+
+ // debug
+ void savePosteriors(std::string url, bool append=false);
+ void savePosteriors2(std::string url, bool append=false);
+
+private:
+ int classes_;
+ int depth_;
+ int num_leaves_;
+ vector<RTreeNode> nodes_;
+ float **posteriors_; // 16-bytes aligned posteriors
+ uchar **posteriors2_; // 16-bytes aligned posteriors
+ vector<int> leaf_counts_;
+
+ void createNodes(int num_nodes, RNG &rng);
+ void allocPosteriorsAligned(int num_leaves, int num_classes);
+ void freePosteriors(int which); // which: 1=posteriors_, 2=posteriors2_, 3=both
+ void init(int classes, int depth, RNG &rng);
+ void addExample(int class_id, uchar* patch_data);
+ void finalize(size_t reduced_num_dim, int num_quant_bits);
+ int getIndex(uchar* patch_data) const;
+ inline float* getPosteriorByIndex(int index);
+ inline const float* getPosteriorByIndex(int index) const;
+ inline uchar* getPosteriorByIndex2(int index);
+ inline const uchar* getPosteriorByIndex2(int index) const;
+ //void makeRandomMeasMatrix(float *cs_phi, PHI_DISTR_TYPE dt, size_t reduced_num_dim);
+ void convertPosteriorsToChar();
+ void makePosteriors2(int num_quant_bits);
+ void compressLeaves(size_t reduced_num_dim);
+ void estimateQuantPercForPosteriors(float perc[2]);
+};
+
+
+inline uchar* getData(IplImage* image)
+{
+ return reinterpret_cast<uchar*>(image->imageData);
+}
+
+inline float* RandomizedTree::getPosteriorByIndex(int index)
+{
+ return const_cast<float*>(const_cast<const RandomizedTree*>(this)->getPosteriorByIndex(index));
+}
+
+inline const float* RandomizedTree::getPosteriorByIndex(int index) const
+{
+ return posteriors_[index];
+}
+
+inline uchar* RandomizedTree::getPosteriorByIndex2(int index)
+{
+ return const_cast<uchar*>(const_cast<const RandomizedTree*>(this)->getPosteriorByIndex2(index));
+}
+
+inline const uchar* RandomizedTree::getPosteriorByIndex2(int index) const
+{
+ return posteriors2_[index];
+}
+
+struct CV_EXPORTS RTreeNode
+{
+ short offset1, offset2;
+
+ RTreeNode() {}
+ RTreeNode(uchar x1, uchar y1, uchar x2, uchar y2)
+ : offset1(y1*RandomizedTree::PATCH_SIZE + x1),
+ offset2(y2*RandomizedTree::PATCH_SIZE + x2)
+ {}
+
+ //! Left child on 0, right child on 1
+ inline bool operator() (uchar* patch_data) const
+ {
+ return patch_data[offset1] > patch_data[offset2];
+ }
+};
+
+class CV_EXPORTS RTreeClassifier
+{
+public:
+ static const int DEFAULT_TREES = 48;
+ static const size_t DEFAULT_NUM_QUANT_BITS = 4;
+
+ RTreeClassifier();
+ void train(vector<BaseKeypoint> const& base_set,
+ RNG &rng,
+ int num_trees = RTreeClassifier::DEFAULT_TREES,
+ int depth = RandomizedTree::DEFAULT_DEPTH,
+ int views = RandomizedTree::DEFAULT_VIEWS,
+ size_t reduced_num_dim = RandomizedTree::DEFAULT_REDUCED_NUM_DIM,
+ int num_quant_bits = DEFAULT_NUM_QUANT_BITS);
+ void train(vector<BaseKeypoint> const& base_set,
+ RNG &rng,
+ PatchGenerator &make_patch,
+ int num_trees = RTreeClassifier::DEFAULT_TREES,
+ int depth = RandomizedTree::DEFAULT_DEPTH,
+ int views = RandomizedTree::DEFAULT_VIEWS,
+ size_t reduced_num_dim = RandomizedTree::DEFAULT_REDUCED_NUM_DIM,
+ int num_quant_bits = DEFAULT_NUM_QUANT_BITS);
+
+ // sig must point to a memory block of at least classes()*sizeof(float|uchar) bytes
+ void getSignature(IplImage *patch, uchar *sig) const;
+ void getSignature(IplImage *patch, float *sig) const;
+ void getSparseSignature(IplImage *patch, float *sig, float thresh) const;
+ // TODO: deprecated in favor of getSignature overload, remove
+ void getFloatSignature(IplImage *patch, float *sig) const { getSignature(patch, sig); }
+
+ static int countNonZeroElements(float *vec, int n, double tol=1e-10);
+ static inline void safeSignatureAlloc(uchar **sig, int num_sig=1, int sig_len=176);
+ static inline uchar* safeSignatureAlloc(int num_sig=1, int sig_len=176);
+
+ inline int classes() const { return classes_; }
+ inline int original_num_classes() const { return original_num_classes_; }
+
+ void setQuantization(int num_quant_bits);
+ void discardFloatPosteriors();
+
+ void read(const char* file_name);
+ void read(std::istream &is);
+ void write(const char* file_name) const;
+ void write(std::ostream &os) const;
+
+ // experimental and debug
+ void saveAllFloatPosteriors(std::string file_url);
+ void saveAllBytePosteriors(std::string file_url);
+ void setFloatPosteriorsFromTextfile_176(std::string url);
+ float countZeroElements();
+
+ vector<RandomizedTree> trees_;
+
+private:
+ int classes_;
+ int num_quant_bits_;
+ mutable uchar **posteriors_;
+ mutable unsigned short *ptemp_;
+ int original_num_classes_;
+ bool keep_floats_;
+};
+
+/****************************************************************************************\
+* One-Way Descriptor *
+\****************************************************************************************/
+
+// CvAffinePose: defines a parameterized affine transformation of an image patch.
+// An image patch is rotated on angle phi (in degrees), then scaled lambda1 times
+// along horizontal and lambda2 times along vertical direction, and then rotated again
+// on angle (theta - phi).
+class CV_EXPORTS CvAffinePose
+{
+public:
+ float phi;
+ float theta;
+ float lambda1;
+ float lambda2;
+};
+
+class CV_EXPORTS OneWayDescriptor
+{
+public:
+ OneWayDescriptor();
+ ~OneWayDescriptor();
+
+ // allocates memory for given descriptor parameters
+ void Allocate(int pose_count, CvSize size, int nChannels);
+
+ // GenerateSamples: generates affine transformed patches with averaging them over small transformation variations.
+ // If external poses and transforms were specified, uses them instead of generating random ones
+ // - pose_count: the number of poses to be generated
+ // - frontal: the input patch (can be a roi in a larger image)
+ // - norm: if nonzero, normalizes the output patch so that the sum of pixel intensities is 1
+ void GenerateSamples(int pose_count, IplImage* frontal, int norm = 0);
+
+ // GenerateSamplesFast: generates affine transformed patches with averaging them over small transformation variations.
+ // Uses precalculated transformed pca components.
+ // - frontal: the input patch (can be a roi in a larger image)
+ // - pca_hr_avg: pca average vector
+ // - pca_hr_eigenvectors: pca eigenvectors
+ // - pca_descriptors: an array of precomputed descriptors of pca components containing their affine transformations
+ // pca_descriptors[0] corresponds to the average, pca_descriptors[1]-pca_descriptors[pca_dim] correspond to eigenvectors
+ void GenerateSamplesFast(IplImage* frontal, CvMat* pca_hr_avg,
+ CvMat* pca_hr_eigenvectors, OneWayDescriptor* pca_descriptors);
+
+ // sets the poses and corresponding transforms
+ void SetTransforms(CvAffinePose* poses, CvMat** transforms);
+
+ // Initialize: builds a descriptor.
+ // - pose_count: the number of poses to build. If poses were set externally, uses them rather than generating random ones
+ // - frontal: input patch. Can be a roi in a larger image
+ // - feature_name: the feature name to be associated with the descriptor
+ // - norm: if 1, the affine transformed patches are normalized so that their sum is 1
+ void Initialize(int pose_count, IplImage* frontal, const char* feature_name = 0, int norm = 0);
+
+ // InitializeFast: builds a descriptor using precomputed descriptors of pca components
+ // - pose_count: the number of poses to build
+ // - frontal: input patch. Can be a roi in a larger image
+ // - feature_name: the feature name to be associated with the descriptor
+ // - pca_hr_avg: average vector for PCA
+ // - pca_hr_eigenvectors: PCA eigenvectors (one vector per row)
+ // - pca_descriptors: precomputed descriptors of PCA components, the first descriptor for the average vector
+ // followed by the descriptors for eigenvectors
+ void InitializeFast(int pose_count, IplImage* frontal, const char* feature_name,
+ CvMat* pca_hr_avg, CvMat* pca_hr_eigenvectors, OneWayDescriptor* pca_descriptors);
+
+ // ProjectPCASample: unwarps an image patch into a vector and projects it into PCA space
+ // - patch: input image patch
+ // - avg: PCA average vector
+ // - eigenvectors: PCA eigenvectors, one per row
+ // - pca_coeffs: output PCA coefficients
+ void ProjectPCASample(IplImage* patch, CvMat* avg, CvMat* eigenvectors, CvMat* pca_coeffs) const;
+
+ // InitializePCACoeffs: projects all warped patches into PCA space
+ // - avg: PCA average vector
+ // - eigenvectors: PCA eigenvectors, one per row
+ void InitializePCACoeffs(CvMat* avg, CvMat* eigenvectors);
+
+ // EstimatePose: finds the closest match between an input patch and a set of patches with different poses
+ // - patch: input image patch
+ // - pose_idx: the output index of the closest pose
+ // - distance: the distance to the closest pose (L2 distance)
+ void EstimatePose(IplImage* patch, int& pose_idx, float& distance) const;
+
+ // EstimatePosePCA: finds the closest match between an input patch and a set of patches with different poses.
+ // The distance between patches is computed in PCA space
+ // - patch: input image patch
+ // - pose_idx: the output index of the closest pose
+ // - distance: distance to the closest pose (L2 distance in PCA space)
+ // - avg: PCA average vector. If 0, matching without PCA is used
+ // - eigenvectors: PCA eigenvectors, one per row
+ void EstimatePosePCA(CvArr* patch, int& pose_idx, float& distance, CvMat* avg, CvMat* eigenvalues) const;
+
+ // GetPatchSize: returns the size of each image patch after warping (2 times smaller than the input patch)
+ CvSize GetPatchSize() const
+ {
+ return m_patch_size;
+ }
+
+ // GetInputPatchSize: returns the required size of the patch that the descriptor is built from
+ // (2 time larger than the patch after warping)
+ CvSize GetInputPatchSize() const
+ {
+ return cvSize(m_patch_size.width*2, m_patch_size.height*2);
+ }
+
+ // GetPatch: returns a patch corresponding to specified pose index
+ // - index: pose index
+ // - return value: the patch corresponding to specified pose index
+ IplImage* GetPatch(int index);
+
+ // GetPose: returns a pose corresponding to specified pose index
+ // - index: pose index
+ // - return value: the pose corresponding to specified pose index
+ CvAffinePose GetPose(int index) const;
+
+ // Save: saves all patches with different poses to a specified path
+ void Save(const char* path);
+
+ // ReadByName: reads a descriptor from a file storage
+ // - fs: file storage
+ // - parent: parent node
+ // - name: node name
+ // - return value: 1 if succeeded, 0 otherwise
+ int ReadByName(CvFileStorage* fs, CvFileNode* parent, const char* name);
+
+ // ReadByName: reads a descriptor from a file node
+ // - parent: parent node
+ // - name: node name
+ // - return value: 1 if succeeded, 0 otherwise
+ int ReadByName(const FileNode &parent, const char* name);
+
+ // Write: writes a descriptor into a file storage
+ // - fs: file storage
+ // - name: node name
+ void Write(CvFileStorage* fs, const char* name);
+
+ // GetFeatureName: returns a name corresponding to a feature
+ const char* GetFeatureName() const;
+
+ // GetCenter: returns the center of the feature
+ CvPoint GetCenter() const;
+
+ void SetPCADimHigh(int pca_dim_high) {m_pca_dim_high = pca_dim_high;};
+ void SetPCADimLow(int pca_dim_low) {m_pca_dim_low = pca_dim_low;};
+
+ int GetPCADimLow() const;
+ int GetPCADimHigh() const;
+
+ CvMat** GetPCACoeffs() const {return m_pca_coeffs;}
+
+protected:
+ int m_pose_count; // the number of poses
+ CvSize m_patch_size; // size of each image
+ IplImage** m_samples; // an array of length m_pose_count containing the patch in different poses
+ IplImage* m_input_patch;
+ IplImage* m_train_patch;
+ CvMat** m_pca_coeffs; // an array of length m_pose_count containing pca decomposition of the patch in different poses
+ CvAffinePose* m_affine_poses; // an array of poses
+ CvMat** m_transforms; // an array of affine transforms corresponding to poses
+
+ string m_feature_name; // the name of the feature associated with the descriptor
+ CvPoint m_center; // the coordinates of the feature (the center of the input image ROI)
+
+ int m_pca_dim_high; // the number of descriptor pca components to use for generating affine poses
+ int m_pca_dim_low; // the number of pca components to use for comparison
+};
+
+
+// OneWayDescriptorBase: encapsulates functionality for training/loading a set of one way descriptors
+// and finding the nearest closest descriptor to an input feature
+class CV_EXPORTS OneWayDescriptorBase
+{
+public:
+
+ // creates an instance of OneWayDescriptor from a set of training files
+ // - patch_size: size of the input (large) patch
+ // - pose_count: the number of poses to generate for each descriptor
+ // - train_path: path to training files
+ // - pca_config: the name of the file that contains PCA for small patches (2 times smaller
+ // than patch_size each dimension
+ // - pca_hr_config: the name of the file that contains PCA for large patches (of patch_size size)
+ // - pca_desc_config: the name of the file that contains descriptors of PCA components
+ OneWayDescriptorBase(CvSize patch_size, int pose_count, const char* train_path = 0, const char* pca_config = 0,
+ const char* pca_hr_config = 0, const char* pca_desc_config = 0, int pyr_levels = 1,
+ int pca_dim_high = 100, int pca_dim_low = 100);
+
+ OneWayDescriptorBase(CvSize patch_size, int pose_count, const string &pca_filename, const string &train_path = string(), const string &images_list = string(),
+ float _scale_min = 0.7f, float _scale_max=1.5f, float _scale_step=1.2f, int pyr_levels = 1,
+ int pca_dim_high = 100, int pca_dim_low = 100);
+
+
+ virtual ~OneWayDescriptorBase();
+ void clear ();
+
+
+ // Allocate: allocates memory for a given number of descriptors
+ void Allocate(int train_feature_count);
+
+ // AllocatePCADescriptors: allocates memory for pca descriptors
+ void AllocatePCADescriptors();
+
+ // returns patch size
+ CvSize GetPatchSize() const {return m_patch_size;};
+ // returns the number of poses for each descriptor
+ int GetPoseCount() const {return m_pose_count;};
+
+ // returns the number of pyramid levels
+ int GetPyrLevels() const {return m_pyr_levels;};
+
+ // returns the number of descriptors
+ int GetDescriptorCount() const {return m_train_feature_count;};
+
+ // CreateDescriptorsFromImage: creates descriptors for each of the input features
+ // - src: input image
+ // - features: input features
+ // - pyr_levels: the number of pyramid levels
+ void CreateDescriptorsFromImage(IplImage* src, const vector<KeyPoint>& features);
+
+ // CreatePCADescriptors: generates descriptors for PCA components, needed for fast generation of feature descriptors
+ void CreatePCADescriptors();
+
+ // returns a feature descriptor by feature index
+ const OneWayDescriptor* GetDescriptor(int desc_idx) const {return &m_descriptors[desc_idx];};
+
+ // FindDescriptor: finds the closest descriptor
+ // - patch: input image patch
+ // - desc_idx: output index of the closest descriptor to the input patch
+ // - pose_idx: output index of the closest pose of the closest descriptor to the input patch
+ // - distance: distance from the input patch to the closest feature pose
+ // - _scales: scales of the input patch for each descriptor
+ // - scale_ranges: input scales variation (float[2])
+ void FindDescriptor(IplImage* patch, int& desc_idx, int& pose_idx, float& distance, float* _scale = 0, float* scale_ranges = 0) const;
+
+ // - patch: input image patch
+ // - n: number of the closest indexes
+ // - desc_idxs: output indexes of the closest descriptor to the input patch (n)
+ // - pose_idx: output indexes of the closest pose of the closest descriptor to the input patch (n)
+ // - distances: distance from the input patch to the closest feature pose (n)
+ // - _scales: scales of the input patch
+ // - scale_ranges: input scales variation (float[2])
+ void FindDescriptor(IplImage* patch, int n, vector<int>& desc_idxs, vector<int>& pose_idxs,
+ vector<float>& distances, vector<float>& _scales, float* scale_ranges = 0) const;
+
+ // FindDescriptor: finds the closest descriptor
+ // - src: input image
+ // - pt: center of the feature
+ // - desc_idx: output index of the closest descriptor to the input patch
+ // - pose_idx: output index of the closest pose of the closest descriptor to the input patch
+ // - distance: distance from the input patch to the closest feature pose
+ void FindDescriptor(IplImage* src, cv::Point2f pt, int& desc_idx, int& pose_idx, float& distance) const;
+
+ // InitializePoses: generates random poses
+ void InitializePoses();
+
+ // InitializeTransformsFromPoses: generates 2x3 affine matrices from poses (initializes m_transforms)
+ void InitializeTransformsFromPoses();
+
+ // InitializePoseTransforms: subsequently calls InitializePoses and InitializeTransformsFromPoses
+ void InitializePoseTransforms();
+
+ // InitializeDescriptor: initializes a descriptor
+ // - desc_idx: descriptor index
+ // - train_image: image patch (ROI is supported)
+ // - feature_label: feature textual label
+ void InitializeDescriptor(int desc_idx, IplImage* train_image, const char* feature_label);
+
+ void InitializeDescriptor(int desc_idx, IplImage* train_image, const KeyPoint& keypoint, const char* feature_label);
+
+ // InitializeDescriptors: load features from an image and create descriptors for each of them
+ void InitializeDescriptors(IplImage* train_image, const vector<KeyPoint>& features,
+ const char* feature_label = "", int desc_start_idx = 0);
+
+ // Write: writes this object to a file storage
+ // - fs: output filestorage
+ void Write (FileStorage &fs) const;
+
+ // Read: reads OneWayDescriptorBase object from a file node
+ // - fn: input file node
+ void Read (const FileNode &fn);
+
+ // LoadPCADescriptors: loads PCA descriptors from a file
+ // - filename: input filename
+ int LoadPCADescriptors(const char* filename);
+
+ // LoadPCADescriptors: loads PCA descriptors from a file node
+ // - fn: input file node
+ int LoadPCADescriptors(const FileNode &fn);
+
+ // SavePCADescriptors: saves PCA descriptors to a file
+ // - filename: output filename
+ void SavePCADescriptors(const char* filename);
+
+ // SavePCADescriptors: saves PCA descriptors to a file storage
+ // - fs: output file storage
+ void SavePCADescriptors(CvFileStorage* fs) const;
+
+ // GeneratePCA: calculate and save PCA components and descriptors
+ // - img_path: path to training PCA images directory
+ // - images_list: filename with filenames of training PCA images
+ void GeneratePCA(const char* img_path, const char* images_list, int pose_count=500);
+
+ // SetPCAHigh: sets the high resolution pca matrices (copied to internal structures)
+ void SetPCAHigh(CvMat* avg, CvMat* eigenvectors);
+
+ // SetPCALow: sets the low resolution pca matrices (copied to internal structures)
+ void SetPCALow(CvMat* avg, CvMat* eigenvectors);
+
+ int GetLowPCA(CvMat** avg, CvMat** eigenvectors)
+ {
+ *avg = m_pca_avg;
+ *eigenvectors = m_pca_eigenvectors;
+ return m_pca_dim_low;
+ };
+
+ int GetPCADimLow() const {return m_pca_dim_low;};
+ int GetPCADimHigh() const {return m_pca_dim_high;};
+
+ void ConvertDescriptorsArrayToTree(); // Converting pca_descriptors array to KD tree
+
+ // GetPCAFilename: get default PCA filename
+ static string GetPCAFilename () { return "pca.yml"; }
+
+ virtual bool empty() const { return m_train_feature_count <= 0 ? true : false; }
+
+protected:
+ CvSize m_patch_size; // patch size
+ int m_pose_count; // the number of poses for each descriptor
+ int m_train_feature_count; // the number of the training features
+ OneWayDescriptor* m_descriptors; // array of train feature descriptors
+ CvMat* m_pca_avg; // PCA average Vector for small patches
+ CvMat* m_pca_eigenvectors; // PCA eigenvectors for small patches
+ CvMat* m_pca_hr_avg; // PCA average Vector for large patches
+ CvMat* m_pca_hr_eigenvectors; // PCA eigenvectors for large patches
+ OneWayDescriptor* m_pca_descriptors; // an array of PCA descriptors
+
+ cv::flann::Index* m_pca_descriptors_tree;
+ CvMat* m_pca_descriptors_matrix;
+
+ CvAffinePose* m_poses; // array of poses
+ CvMat** m_transforms; // array of affine transformations corresponding to poses
+
+ int m_pca_dim_high;
+ int m_pca_dim_low;
+
+ int m_pyr_levels;
+ float scale_min;
+ float scale_max;
+ float scale_step;
+
+ // SavePCAall: saves PCA components and descriptors to a file storage
+ // - fs: output file storage
+ void SavePCAall (FileStorage &fs) const;
+
+ // LoadPCAall: loads PCA components and descriptors from a file node
+ // - fn: input file node
+ void LoadPCAall (const FileNode &fn);
+};
+
+class CV_EXPORTS OneWayDescriptorObject : public OneWayDescriptorBase
+{
+public:
+ // creates an instance of OneWayDescriptorObject from a set of training files
+ // - patch_size: size of the input (large) patch
+ // - pose_count: the number of poses to generate for each descriptor
+ // - train_path: path to training files
+ // - pca_config: the name of the file that contains PCA for small patches (2 times smaller
+ // than patch_size each dimension
+ // - pca_hr_config: the name of the file that contains PCA for large patches (of patch_size size)
+ // - pca_desc_config: the name of the file that contains descriptors of PCA components
+ OneWayDescriptorObject(CvSize patch_size, int pose_count, const char* train_path, const char* pca_config,
+ const char* pca_hr_config = 0, const char* pca_desc_config = 0, int pyr_levels = 1);
+
+ OneWayDescriptorObject(CvSize patch_size, int pose_count, const string &pca_filename,
+ const string &train_path = string (), const string &images_list = string (),
+ float _scale_min = 0.7f, float _scale_max=1.5f, float _scale_step=1.2f, int pyr_levels = 1);
+
+
+ virtual ~OneWayDescriptorObject();
+
+ // Allocate: allocates memory for a given number of features
+ // - train_feature_count: the total number of features
+ // - object_feature_count: the number of features extracted from the object
+ void Allocate(int train_feature_count, int object_feature_count);
+
+
+ void SetLabeledFeatures(const vector<KeyPoint>& features) {m_train_features = features;};
+ vector<KeyPoint>& GetLabeledFeatures() {return m_train_features;};
+ const vector<KeyPoint>& GetLabeledFeatures() const {return m_train_features;};
+ vector<KeyPoint> _GetLabeledFeatures() const;
+
+ // IsDescriptorObject: returns 1 if descriptor with specified index is positive, otherwise 0
+ int IsDescriptorObject(int desc_idx) const;
+
+ // MatchPointToPart: returns the part number of a feature if it matches one of the object parts, otherwise -1
+ int MatchPointToPart(CvPoint pt) const;
+
+ // GetDescriptorPart: returns the part number of the feature corresponding to a specified descriptor
+ // - desc_idx: descriptor index
+ int GetDescriptorPart(int desc_idx) const;
+
+
+ void InitializeObjectDescriptors(IplImage* train_image, const vector<KeyPoint>& features,
+ const char* feature_label, int desc_start_idx = 0, float scale = 1.0f,
+ int is_background = 0);
+
+ // GetObjectFeatureCount: returns the number of object features
+ int GetObjectFeatureCount() const {return m_object_feature_count;};
+
+protected:
+ int* m_part_id; // contains part id for each of object descriptors
+ vector<KeyPoint> m_train_features; // train features
+ int m_object_feature_count; // the number of the positive features
+
+};
+
+
+/*
+ * OneWayDescriptorMatcher
+ */
+class OneWayDescriptorMatcher;
+typedef OneWayDescriptorMatcher OneWayDescriptorMatch;
+
+class CV_EXPORTS OneWayDescriptorMatcher : public GenericDescriptorMatcher
+{
+public:
+ class CV_EXPORTS Params
+ {
+ public:
+ static const int POSE_COUNT = 500;
+ static const int PATCH_WIDTH = 24;
+ static const int PATCH_HEIGHT = 24;
+ static float GET_MIN_SCALE() { return 0.7f; }
+ static float GET_MAX_SCALE() { return 1.5f; }
+ static float GET_STEP_SCALE() { return 1.2f; }
+
+ Params( int poseCount = POSE_COUNT,
+ Size patchSize = Size(PATCH_WIDTH, PATCH_HEIGHT),
+ string pcaFilename = string(),
+ string trainPath = string(), string trainImagesList = string(),
+ float minScale = GET_MIN_SCALE(), float maxScale = GET_MAX_SCALE(),
+ float stepScale = GET_STEP_SCALE() );
+
+ int poseCount;
+ Size patchSize;
+ string pcaFilename;
+ string trainPath;
+ string trainImagesList;
+
+ float minScale, maxScale, stepScale;
+ };
+
+ OneWayDescriptorMatcher( const Params& params=Params() );
+ virtual ~OneWayDescriptorMatcher();
+
+ void initialize( const Params& params, const Ptr<OneWayDescriptorBase>& base=Ptr<OneWayDescriptorBase>() );
+
+ // Clears keypoints storing in collection and OneWayDescriptorBase
+ virtual void clear();
+
+ virtual void train();
+
+ virtual bool isMaskSupported();
+
+ virtual void read( const FileNode &fn );
+ virtual void write( FileStorage& fs ) const;
+
+ virtual bool empty() const;
+
+ virtual Ptr<GenericDescriptorMatcher> clone( bool emptyTrainData=false ) const;
+
+protected:
+ // Matches a set of keypoints from a single image of the training set. A rectangle with a center in a keypoint
+ // and size (patch_width/2*scale, patch_height/2*scale) is cropped from the source image for each
+ // keypoint. scale is iterated from DescriptorOneWayParams::min_scale to DescriptorOneWayParams::max_scale.
+ // The minimum distance to each training patch with all its affine poses is found over all scales.
+ // The class ID of a match is returned for each keypoint. The distance is calculated over PCA components
+ // loaded with DescriptorOneWay::Initialize, kd tree is used for finding minimum distances.
+ virtual void knnMatchImpl( const Mat& queryImage, vector<KeyPoint>& queryKeypoints,
+ vector<vector<DMatch> >& matches, int k,
+ const vector<Mat>& masks, bool compactResult );
+ virtual void radiusMatchImpl( const Mat& queryImage, vector<KeyPoint>& queryKeypoints,
+ vector<vector<DMatch> >& matches, float maxDistance,
+ const vector<Mat>& masks, bool compactResult );
+
+ Ptr<OneWayDescriptorBase> base;
+ Params params;
+ int prevTrainCount;
+};
+
+/*
+ * FernDescriptorMatcher
+ */
+class FernDescriptorMatcher;
+typedef FernDescriptorMatcher FernDescriptorMatch;
+
+class CV_EXPORTS FernDescriptorMatcher : public GenericDescriptorMatcher
+{
+public:
+ class CV_EXPORTS Params
+ {
+ public:
+ Params( int nclasses=0,
+ int patchSize=FernClassifier::PATCH_SIZE,
+ int signatureSize=FernClassifier::DEFAULT_SIGNATURE_SIZE,
+ int nstructs=FernClassifier::DEFAULT_STRUCTS,
+ int structSize=FernClassifier::DEFAULT_STRUCT_SIZE,
+ int nviews=FernClassifier::DEFAULT_VIEWS,
+ int compressionMethod=FernClassifier::COMPRESSION_NONE,
+ const PatchGenerator& patchGenerator=PatchGenerator() );
+
+ Params( const string& filename );
+
+ int nclasses;
+ int patchSize;
+ int signatureSize;
+ int nstructs;
+ int structSize;
+ int nviews;
+ int compressionMethod;
+ PatchGenerator patchGenerator;
+
+ string filename;
+ };
+
+ FernDescriptorMatcher( const Params& params=Params() );
+ virtual ~FernDescriptorMatcher();
+
+ virtual void clear();
+
+ virtual void train();
+
+ virtual bool isMaskSupported();
+
+ virtual void read( const FileNode &fn );
+ virtual void write( FileStorage& fs ) const;
+ virtual bool empty() const;
+
+ virtual Ptr<GenericDescriptorMatcher> clone( bool emptyTrainData=false ) const;
+
+protected:
+ virtual void knnMatchImpl( const Mat& queryImage, vector<KeyPoint>& queryKeypoints,
+ vector<vector<DMatch> >& matches, int k,
+ const vector<Mat>& masks, bool compactResult );
+ virtual void radiusMatchImpl( const Mat& queryImage, vector<KeyPoint>& queryKeypoints,
+ vector<vector<DMatch> >& matches, float maxDistance,
+ const vector<Mat>& masks, bool compactResult );
+
+ void trainFernClassifier();
+ void calcBestProbAndMatchIdx( const Mat& image, const Point2f& pt,
+ float& bestProb, int& bestMatchIdx, vector<float>& signature );
+ Ptr<FernClassifier> classifier;
+ Params params;
+ int prevTrainCount;
+};
+
+
+/*
+ * CalonderDescriptorExtractor
+ */
+template<typename T>
+class CV_EXPORTS CalonderDescriptorExtractor : public DescriptorExtractor
+{
+public:
+ CalonderDescriptorExtractor( const string& classifierFile );
+
+ virtual void read( const FileNode &fn );
+ virtual void write( FileStorage &fs ) const;
+
+ virtual int descriptorSize() const { return classifier_.classes(); }
+ virtual int descriptorType() const { return DataType<T>::type; }
+
+ virtual bool empty() const;
+
+protected:
+ virtual void computeImpl( const Mat& image, vector<KeyPoint>& keypoints, Mat& descriptors ) const;
+
+ RTreeClassifier classifier_;
+ static const int BORDER_SIZE = 16;
+};
+
+template<typename T>
+CalonderDescriptorExtractor<T>::CalonderDescriptorExtractor(const std::string& classifier_file)
+{
+ classifier_.read( classifier_file.c_str() );
+}
+
+template<typename T>
+void CalonderDescriptorExtractor<T>::computeImpl( const Mat& image,
+ vector<KeyPoint>& keypoints,
+ Mat& descriptors) const
+{
+ // Cannot compute descriptors for keypoints on the image border.
+ KeyPointsFilter::runByImageBorder(keypoints, image.size(), BORDER_SIZE);
+
+ /// @todo Check 16-byte aligned
+ descriptors.create((int)keypoints.size(), classifier_.classes(), cv::DataType<T>::type);
+
+ int patchSize = RandomizedTree::PATCH_SIZE;
+ int offset = patchSize / 2;
+ for (size_t i = 0; i < keypoints.size(); ++i)
+ {
+ cv::Point2f pt = keypoints[i].pt;
+ IplImage ipl = image( Rect((int)(pt.x - offset), (int)(pt.y - offset), patchSize, patchSize) );
+ classifier_.getSignature( &ipl, descriptors.ptr<T>((int)i));
+ }
+}
+
+template<typename T>
+void CalonderDescriptorExtractor<T>::read( const FileNode& )
+{}
+
+template<typename T>
+void CalonderDescriptorExtractor<T>::write( FileStorage& ) const
+{}
+
+template<typename T>
+bool CalonderDescriptorExtractor<T>::empty() const
+{
+ return classifier_.trees_.empty();
+}
+
+
+////////////////////// Brute Force Matcher //////////////////////////
+
+template<class Distance>
+class CV_EXPORTS BruteForceMatcher : public BFMatcher
+{
+public:
+ BruteForceMatcher( Distance d = Distance() ) : BFMatcher(Distance::normType, false) {(void)d;}
+ virtual ~BruteForceMatcher() {}
+};
+
+
+/****************************************************************************************\
+* Planar Object Detection *
+\****************************************************************************************/
+
+class CV_EXPORTS PlanarObjectDetector
+{
+public:
+ PlanarObjectDetector();
+ PlanarObjectDetector(const FileNode& node);
+ PlanarObjectDetector(const vector<Mat>& pyr, int _npoints=300,
+ int _patchSize=FernClassifier::PATCH_SIZE,
+ int _nstructs=FernClassifier::DEFAULT_STRUCTS,
+ int _structSize=FernClassifier::DEFAULT_STRUCT_SIZE,
+ int _nviews=FernClassifier::DEFAULT_VIEWS,
+ const LDetector& detector=LDetector(),
+ const PatchGenerator& patchGenerator=PatchGenerator());
+ virtual ~PlanarObjectDetector();
+ virtual void train(const vector<Mat>& pyr, int _npoints=300,
+ int _patchSize=FernClassifier::PATCH_SIZE,
+ int _nstructs=FernClassifier::DEFAULT_STRUCTS,
+ int _structSize=FernClassifier::DEFAULT_STRUCT_SIZE,
+ int _nviews=FernClassifier::DEFAULT_VIEWS,
+ const LDetector& detector=LDetector(),
+ const PatchGenerator& patchGenerator=PatchGenerator());
+ virtual void train(const vector<Mat>& pyr, const vector<KeyPoint>& keypoints,
+ int _patchSize=FernClassifier::PATCH_SIZE,
+ int _nstructs=FernClassifier::DEFAULT_STRUCTS,
+ int _structSize=FernClassifier::DEFAULT_STRUCT_SIZE,
+ int _nviews=FernClassifier::DEFAULT_VIEWS,
+ const LDetector& detector=LDetector(),
+ const PatchGenerator& patchGenerator=PatchGenerator());
+ Rect getModelROI() const;
+ vector<KeyPoint> getModelPoints() const;
+ const LDetector& getDetector() const;
+ const FernClassifier& getClassifier() const;
+ void setVerbose(bool verbose);
+
+ void read(const FileNode& node);
+ void write(FileStorage& fs, const String& name=String()) const;
+ bool operator()(const Mat& image, CV_OUT Mat& H, CV_OUT vector<Point2f>& corners) const;
+ bool operator()(const vector<Mat>& pyr, const vector<KeyPoint>& keypoints,
+ CV_OUT Mat& H, CV_OUT vector<Point2f>& corners,
+ CV_OUT vector<int>* pairs=0) const;
+
+protected:
+ bool verbose;
+ Rect modelROI;
+ vector<KeyPoint> modelPoints;
+ LDetector ldetector;
+ FernClassifier fernClassifier;
+};
+
+}
+
+// 2009-01-12, Xavier Delacour <xavier.delacour@gmail.com>
+
+struct lsh_hash {
+ int h1, h2;
+};
+
+struct CvLSHOperations
+{
+ virtual ~CvLSHOperations() {}
+
+ virtual int vector_add(const void* data) = 0;
+ virtual void vector_remove(int i) = 0;
+ virtual const void* vector_lookup(int i) = 0;
+ virtual void vector_reserve(int n) = 0;
+ virtual unsigned int vector_count() = 0;
+
+ virtual void hash_insert(lsh_hash h, int l, int i) = 0;
+ virtual void hash_remove(lsh_hash h, int l, int i) = 0;
+ virtual int hash_lookup(lsh_hash h, int l, int* ret_i, int ret_i_max) = 0;
+};
+
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Splits color or grayscale image into multiple connected components
+ of nearly the same color/brightness using modification of Burt algorithm.
+ comp with contain a pointer to sequence (CvSeq)
+ of connected components (CvConnectedComp) */
+CVAPI(void) cvPyrSegmentation( IplImage* src, IplImage* dst,
+ CvMemStorage* storage, CvSeq** comp,
+ int level, double threshold1,
+ double threshold2 );
+
+/****************************************************************************************\
+* Planar subdivisions *
+\****************************************************************************************/
+
+/* Initializes Delaunay triangulation */
+CVAPI(void) cvInitSubdivDelaunay2D( CvSubdiv2D* subdiv, CvRect rect );
+
+/* Creates new subdivision */
+CVAPI(CvSubdiv2D*) cvCreateSubdiv2D( int subdiv_type, int header_size,
+ int vtx_size, int quadedge_size,
+ CvMemStorage* storage );
+
+/************************* high-level subdivision functions ***************************/
+
+/* Simplified Delaunay diagram creation */
+CV_INLINE CvSubdiv2D* cvCreateSubdivDelaunay2D( CvRect rect, CvMemStorage* storage )
+{
+ CvSubdiv2D* subdiv = cvCreateSubdiv2D( CV_SEQ_KIND_SUBDIV2D, sizeof(*subdiv),
+ sizeof(CvSubdiv2DPoint), sizeof(CvQuadEdge2D), storage );
+
+ cvInitSubdivDelaunay2D( subdiv, rect );
+ return subdiv;
+}
+
+
+/* Inserts new point to the Delaunay triangulation */
+CVAPI(CvSubdiv2DPoint*) cvSubdivDelaunay2DInsert( CvSubdiv2D* subdiv, CvPoint2D32f pt);
+
+/* Locates a point within the Delaunay triangulation (finds the edge
+ the point is left to or belongs to, or the triangulation point the given
+ point coinsides with */
+CVAPI(CvSubdiv2DPointLocation) cvSubdiv2DLocate(
+ CvSubdiv2D* subdiv, CvPoint2D32f pt,
+ CvSubdiv2DEdge* edge,
+ CvSubdiv2DPoint** vertex CV_DEFAULT(NULL) );
+
+/* Calculates Voronoi tesselation (i.e. coordinates of Voronoi points) */
+CVAPI(void) cvCalcSubdivVoronoi2D( CvSubdiv2D* subdiv );
+
+
+/* Removes all Voronoi points from the tesselation */
+CVAPI(void) cvClearSubdivVoronoi2D( CvSubdiv2D* subdiv );
+
+
+/* Finds the nearest to the given point vertex in subdivision. */
+CVAPI(CvSubdiv2DPoint*) cvFindNearestPoint2D( CvSubdiv2D* subdiv, CvPoint2D32f pt );
+
+
+/************ Basic quad-edge navigation and operations ************/
+
+CV_INLINE CvSubdiv2DEdge cvSubdiv2DNextEdge( CvSubdiv2DEdge edge )
+{
+ return CV_SUBDIV2D_NEXT_EDGE(edge);
+}
+
+
+CV_INLINE CvSubdiv2DEdge cvSubdiv2DRotateEdge( CvSubdiv2DEdge edge, int rotate )
+{
+ return (edge & ~3) + ((edge + rotate) & 3);
+}
+
+CV_INLINE CvSubdiv2DEdge cvSubdiv2DSymEdge( CvSubdiv2DEdge edge )
+{
+ return edge ^ 2;
+}
+
+CV_INLINE CvSubdiv2DEdge cvSubdiv2DGetEdge( CvSubdiv2DEdge edge, CvNextEdgeType type )
+{
+ CvQuadEdge2D* e = (CvQuadEdge2D*)(edge & ~3);
+ edge = e->next[(edge + (int)type) & 3];
+ return (edge & ~3) + ((edge + ((int)type >> 4)) & 3);
+}
+
+
+CV_INLINE CvSubdiv2DPoint* cvSubdiv2DEdgeOrg( CvSubdiv2DEdge edge )
+{
+ CvQuadEdge2D* e = (CvQuadEdge2D*)(edge & ~3);
+ return (CvSubdiv2DPoint*)e->pt[edge & 3];
+}
+
+
+CV_INLINE CvSubdiv2DPoint* cvSubdiv2DEdgeDst( CvSubdiv2DEdge edge )
+{
+ CvQuadEdge2D* e = (CvQuadEdge2D*)(edge & ~3);
+ return (CvSubdiv2DPoint*)e->pt[(edge + 2) & 3];
+}
+
+/****************************************************************************************\
+* Additional operations on Subdivisions *
+\****************************************************************************************/
+
+// paints voronoi diagram: just demo function
+CVAPI(void) icvDrawMosaic( CvSubdiv2D* subdiv, IplImage* src, IplImage* dst );
+
+// checks planar subdivision for correctness. It is not an absolute check,
+// but it verifies some relations between quad-edges
+CVAPI(int) icvSubdiv2DCheck( CvSubdiv2D* subdiv );
+
+// returns squared distance between two 2D points with floating-point coordinates.
+CV_INLINE double icvSqDist2D32f( CvPoint2D32f pt1, CvPoint2D32f pt2 )
+{
+ double dx = pt1.x - pt2.x;
+ double dy = pt1.y - pt2.y;
+
+ return dx*dx + dy*dy;
+}
+
+
+
+
+CV_INLINE double cvTriangleArea( CvPoint2D32f a, CvPoint2D32f b, CvPoint2D32f c )
+{
+ return ((double)b.x - a.x) * ((double)c.y - a.y) - ((double)b.y - a.y) * ((double)c.x - a.x);
+}
+
+
+/* Constructs kd-tree from set of feature descriptors */
+CVAPI(struct CvFeatureTree*) cvCreateKDTree(CvMat* desc);
+
+/* Constructs spill-tree from set of feature descriptors */
+CVAPI(struct CvFeatureTree*) cvCreateSpillTree( const CvMat* raw_data,
+ const int naive CV_DEFAULT(50),
+ const double rho CV_DEFAULT(.7),
+ const double tau CV_DEFAULT(.1) );
+
+/* Release feature tree */
+CVAPI(void) cvReleaseFeatureTree(struct CvFeatureTree* tr);
+
+/* Searches feature tree for k nearest neighbors of given reference points,
+ searching (in case of kd-tree/bbf) at most emax leaves. */
+CVAPI(void) cvFindFeatures(struct CvFeatureTree* tr, const CvMat* query_points,
+ CvMat* indices, CvMat* dist, int k, int emax CV_DEFAULT(20));
+
+/* Search feature tree for all points that are inlier to given rect region.
+ Only implemented for kd trees */
+CVAPI(int) cvFindFeaturesBoxed(struct CvFeatureTree* tr,
+ CvMat* bounds_min, CvMat* bounds_max,
+ CvMat* out_indices);
+
+
+/* Construct a Locality Sensitive Hash (LSH) table, for indexing d-dimensional vectors of
+ given type. Vectors will be hashed L times with k-dimensional p-stable (p=2) functions. */
+CVAPI(struct CvLSH*) cvCreateLSH(struct CvLSHOperations* ops, int d,
+ int L CV_DEFAULT(10), int k CV_DEFAULT(10),
+ int type CV_DEFAULT(CV_64FC1), double r CV_DEFAULT(4),
+ int64 seed CV_DEFAULT(-1));
+
+/* Construct in-memory LSH table, with n bins. */
+CVAPI(struct CvLSH*) cvCreateMemoryLSH(int d, int n, int L CV_DEFAULT(10), int k CV_DEFAULT(10),
+ int type CV_DEFAULT(CV_64FC1), double r CV_DEFAULT(4),
+ int64 seed CV_DEFAULT(-1));
+
+/* Free the given LSH structure. */
+CVAPI(void) cvReleaseLSH(struct CvLSH** lsh);
+
+/* Return the number of vectors in the LSH. */
+CVAPI(unsigned int) LSHSize(struct CvLSH* lsh);
+
+/* Add vectors to the LSH structure, optionally returning indices. */
+CVAPI(void) cvLSHAdd(struct CvLSH* lsh, const CvMat* data, CvMat* indices CV_DEFAULT(0));
+
+/* Remove vectors from LSH, as addressed by given indices. */
+CVAPI(void) cvLSHRemove(struct CvLSH* lsh, const CvMat* indices);
+
+/* Query the LSH n times for at most k nearest points; data is n x d,
+ indices and dist are n x k. At most emax stored points will be accessed. */
+CVAPI(void) cvLSHQuery(struct CvLSH* lsh, const CvMat* query_points,
+ CvMat* indices, CvMat* dist, int k, int emax);
+
+/* Kolmogorov-Zabin stereo-correspondence algorithm (a.k.a. KZ1) */
+#define CV_STEREO_GC_OCCLUDED SHRT_MAX
+
+typedef struct CvStereoGCState
+{
+ int Ithreshold;
+ int interactionRadius;
+ float K, lambda, lambda1, lambda2;
+ int occlusionCost;
+ int minDisparity;
+ int numberOfDisparities;
+ int maxIters;
+
+ CvMat* left;
+ CvMat* right;
+ CvMat* dispLeft;
+ CvMat* dispRight;
+ CvMat* ptrLeft;
+ CvMat* ptrRight;
+ CvMat* vtxBuf;
+ CvMat* edgeBuf;
+} CvStereoGCState;
+
+CVAPI(CvStereoGCState*) cvCreateStereoGCState( int numberOfDisparities, int maxIters );
+CVAPI(void) cvReleaseStereoGCState( CvStereoGCState** state );
+
+CVAPI(void) cvFindStereoCorrespondenceGC( const CvArr* left, const CvArr* right,
+ CvArr* disparityLeft, CvArr* disparityRight,
+ CvStereoGCState* state,
+ int useDisparityGuess CV_DEFAULT(0) );
+
+/* Calculates optical flow for 2 images using classical Lucas & Kanade algorithm */
+CVAPI(void) cvCalcOpticalFlowLK( const CvArr* prev, const CvArr* curr,
+ CvSize win_size, CvArr* velx, CvArr* vely );
+
+/* Calculates optical flow for 2 images using block matching algorithm */
+CVAPI(void) cvCalcOpticalFlowBM( const CvArr* prev, const CvArr* curr,
+ CvSize block_size, CvSize shift_size,
+ CvSize max_range, int use_previous,
+ CvArr* velx, CvArr* vely );
+
+/* Calculates Optical flow for 2 images using Horn & Schunck algorithm */
+CVAPI(void) cvCalcOpticalFlowHS( const CvArr* prev, const CvArr* curr,
+ int use_previous, CvArr* velx, CvArr* vely,
+ double lambda, CvTermCriteria criteria );
+
+
+/****************************************************************************************\
+* Background/foreground segmentation *
+\****************************************************************************************/
+
+/* We discriminate between foreground and background pixels
+ * by building and maintaining a model of the background.
+ * Any pixel which does not fit this model is then deemed
+ * to be foreground.
+ *
+ * At present we support two core background models,
+ * one of which has two variations:
+ *
+ * o CV_BG_MODEL_FGD: latest and greatest algorithm, described in
+ *
+ * Foreground Object Detection from Videos Containing Complex Background.
+ * Liyuan Li, Weimin Huang, Irene Y.H. Gu, and Qi Tian.
+ * ACM MM2003 9p
+ *
+ * o CV_BG_MODEL_FGD_SIMPLE:
+ * A code comment describes this as a simplified version of the above,
+ * but the code is in fact currently identical
+ *
+ * o CV_BG_MODEL_MOG: "Mixture of Gaussians", older algorithm, described in
+ *
+ * Moving target classification and tracking from real-time video.
+ * A Lipton, H Fujijoshi, R Patil
+ * Proceedings IEEE Workshop on Application of Computer Vision pp 8-14 1998
+ *
+ * Learning patterns of activity using real-time tracking
+ * C Stauffer and W Grimson August 2000
+ * IEEE Transactions on Pattern Analysis and Machine Intelligence 22(8):747-757
+ */
+
+
+#define CV_BG_MODEL_FGD 0
+#define CV_BG_MODEL_MOG 1 /* "Mixture of Gaussians". */
+#define CV_BG_MODEL_FGD_SIMPLE 2
+
+struct CvBGStatModel;
+
+typedef void (CV_CDECL * CvReleaseBGStatModel)( struct CvBGStatModel** bg_model );
+typedef int (CV_CDECL * CvUpdateBGStatModel)( IplImage* curr_frame, struct CvBGStatModel* bg_model,
+ double learningRate );
+
+#define CV_BG_STAT_MODEL_FIELDS() \
+int type; /*type of BG model*/ \
+CvReleaseBGStatModel release; \
+CvUpdateBGStatModel update; \
+IplImage* background; /*8UC3 reference background image*/ \
+IplImage* foreground; /*8UC1 foreground image*/ \
+IplImage** layers; /*8UC3 reference background image, can be null */ \
+int layer_count; /* can be zero */ \
+CvMemStorage* storage; /*storage for foreground_regions*/ \
+CvSeq* foreground_regions /*foreground object contours*/
+
+typedef struct CvBGStatModel
+{
+ CV_BG_STAT_MODEL_FIELDS();
+} CvBGStatModel;
+
+//
+
+// Releases memory used by BGStatModel
+CVAPI(void) cvReleaseBGStatModel( CvBGStatModel** bg_model );
+
+// Updates statistical model and returns number of found foreground regions
+CVAPI(int) cvUpdateBGStatModel( IplImage* current_frame, CvBGStatModel* bg_model,
+ double learningRate CV_DEFAULT(-1));
+
+// Performs FG post-processing using segmentation
+// (all pixels of a region will be classified as foreground if majority of pixels of the region are FG).
+// parameters:
+// segments - pointer to result of segmentation (for example MeanShiftSegmentation)
+// bg_model - pointer to CvBGStatModel structure
+CVAPI(void) cvRefineForegroundMaskBySegm( CvSeq* segments, CvBGStatModel* bg_model );
+
+/* Common use change detection function */
+CVAPI(int) cvChangeDetection( IplImage* prev_frame,
+ IplImage* curr_frame,
+ IplImage* change_mask );
+
+/*
+ Interface of ACM MM2003 algorithm
+ */
+
+/* Default parameters of foreground detection algorithm: */
+#define CV_BGFG_FGD_LC 128
+#define CV_BGFG_FGD_N1C 15
+#define CV_BGFG_FGD_N2C 25
+
+#define CV_BGFG_FGD_LCC 64
+#define CV_BGFG_FGD_N1CC 25
+#define CV_BGFG_FGD_N2CC 40
+
+/* Background reference image update parameter: */
+#define CV_BGFG_FGD_ALPHA_1 0.1f
+
+/* stat model update parameter
+ * 0.002f ~ 1K frame(~45sec), 0.005 ~ 18sec (if 25fps and absolutely static BG)
+ */
+#define CV_BGFG_FGD_ALPHA_2 0.005f
+
+/* start value for alpha parameter (to fast initiate statistic model) */
+#define CV_BGFG_FGD_ALPHA_3 0.1f
+
+#define CV_BGFG_FGD_DELTA 2
+
+#define CV_BGFG_FGD_T 0.9f
+
+#define CV_BGFG_FGD_MINAREA 15.f
+
+#define CV_BGFG_FGD_BG_UPDATE_TRESH 0.5f
+
+/* See the above-referenced Li/Huang/Gu/Tian paper
+ * for a full description of these background-model
+ * tuning parameters.
+ *
+ * Nomenclature: 'c' == "color", a three-component red/green/blue vector.
+ * We use histograms of these to model the range of
+ * colors we've seen at a given background pixel.
+ *
+ * 'cc' == "color co-occurrence", a six-component vector giving
+ * RGB color for both this frame and preceding frame.
+ * We use histograms of these to model the range of
+ * color CHANGES we've seen at a given background pixel.
+ */
+typedef struct CvFGDStatModelParams
+{
+ int Lc; /* Quantized levels per 'color' component. Power of two, typically 32, 64 or 128. */
+ int N1c; /* Number of color vectors used to model normal background color variation at a given pixel. */
+ int N2c; /* Number of color vectors retained at given pixel. Must be > N1c, typically ~ 5/3 of N1c. */
+ /* Used to allow the first N1c vectors to adapt over time to changing background. */
+
+ int Lcc; /* Quantized levels per 'color co-occurrence' component. Power of two, typically 16, 32 or 64. */
+ int N1cc; /* Number of color co-occurrence vectors used to model normal background color variation at a given pixel. */
+ int N2cc; /* Number of color co-occurrence vectors retained at given pixel. Must be > N1cc, typically ~ 5/3 of N1cc. */
+ /* Used to allow the first N1cc vectors to adapt over time to changing background. */
+
+ int is_obj_without_holes;/* If TRUE we ignore holes within foreground blobs. Defaults to TRUE. */
+ int perform_morphing; /* Number of erode-dilate-erode foreground-blob cleanup iterations. */
+ /* These erase one-pixel junk blobs and merge almost-touching blobs. Default value is 1. */
+
+ float alpha1; /* How quickly we forget old background pixel values seen. Typically set to 0.1 */
+ float alpha2; /* "Controls speed of feature learning". Depends on T. Typical value circa 0.005. */
+ float alpha3; /* Alternate to alpha2, used (e.g.) for quicker initial convergence. Typical value 0.1. */
+
+ float delta; /* Affects color and color co-occurrence quantization, typically set to 2. */
+ float T; /* "A percentage value which determines when new features can be recognized as new background." (Typically 0.9).*/
+ float minArea; /* Discard foreground blobs whose bounding box is smaller than this threshold. */
+} CvFGDStatModelParams;
+
+typedef struct CvBGPixelCStatTable
+{
+ float Pv, Pvb;
+ uchar v[3];
+} CvBGPixelCStatTable;
+
+typedef struct CvBGPixelCCStatTable
+{
+ float Pv, Pvb;
+ uchar v[6];
+} CvBGPixelCCStatTable;
+
+typedef struct CvBGPixelStat
+{
+ float Pbc;
+ float Pbcc;
+ CvBGPixelCStatTable* ctable;
+ CvBGPixelCCStatTable* cctable;
+ uchar is_trained_st_model;
+ uchar is_trained_dyn_model;
+} CvBGPixelStat;
+
+
+typedef struct CvFGDStatModel
+{
+ CV_BG_STAT_MODEL_FIELDS();
+ CvBGPixelStat* pixel_stat;
+ IplImage* Ftd;
+ IplImage* Fbd;
+ IplImage* prev_frame;
+ CvFGDStatModelParams params;
+} CvFGDStatModel;
+
+/* Creates FGD model */
+CVAPI(CvBGStatModel*) cvCreateFGDStatModel( IplImage* first_frame,
+ CvFGDStatModelParams* parameters CV_DEFAULT(NULL));
+
+/*
+ Interface of Gaussian mixture algorithm
+
+ "An improved adaptive background mixture model for real-time tracking with shadow detection"
+ P. KadewTraKuPong and R. Bowden,
+ Proc. 2nd European Workshp on Advanced Video-Based Surveillance Systems, 2001."
+ http://personal.ee.surrey.ac.uk/Personal/R.Bowden/publications/avbs01/avbs01.pdf
+ */
+
+/* Note: "MOG" == "Mixture Of Gaussians": */
+
+#define CV_BGFG_MOG_MAX_NGAUSSIANS 500
+
+/* default parameters of gaussian background detection algorithm */
+#define CV_BGFG_MOG_BACKGROUND_THRESHOLD 0.7 /* threshold sum of weights for background test */
+#define CV_BGFG_MOG_STD_THRESHOLD 2.5 /* lambda=2.5 is 99% */
+#define CV_BGFG_MOG_WINDOW_SIZE 200 /* Learning rate; alpha = 1/CV_GBG_WINDOW_SIZE */
+#define CV_BGFG_MOG_NGAUSSIANS 5 /* = K = number of Gaussians in mixture */
+#define CV_BGFG_MOG_WEIGHT_INIT 0.05
+#define CV_BGFG_MOG_SIGMA_INIT 30
+#define CV_BGFG_MOG_MINAREA 15.f
+
+
+#define CV_BGFG_MOG_NCOLORS 3
+
+typedef struct CvGaussBGStatModelParams
+{
+ int win_size; /* = 1/alpha */
+ int n_gauss;
+ double bg_threshold, std_threshold, minArea;
+ double weight_init, variance_init;
+}CvGaussBGStatModelParams;
+
+typedef struct CvGaussBGValues
+{
+ int match_sum;
+ double weight;
+ double variance[CV_BGFG_MOG_NCOLORS];
+ double mean[CV_BGFG_MOG_NCOLORS];
+} CvGaussBGValues;
+
+typedef struct CvGaussBGPoint
+{
+ CvGaussBGValues* g_values;
+} CvGaussBGPoint;
+
+
+typedef struct CvGaussBGModel
+{
+ CV_BG_STAT_MODEL_FIELDS();
+ CvGaussBGStatModelParams params;
+ CvGaussBGPoint* g_point;
+ int countFrames;
+ void* mog;
+} CvGaussBGModel;
+
+
+/* Creates Gaussian mixture background model */
+CVAPI(CvBGStatModel*) cvCreateGaussianBGModel( IplImage* first_frame,
+ CvGaussBGStatModelParams* parameters CV_DEFAULT(NULL));
+
+
+typedef struct CvBGCodeBookElem
+{
+ struct CvBGCodeBookElem* next;
+ int tLastUpdate;
+ int stale;
+ uchar boxMin[3];
+ uchar boxMax[3];
+ uchar learnMin[3];
+ uchar learnMax[3];
+} CvBGCodeBookElem;
+
+typedef struct CvBGCodeBookModel
+{
+ CvSize size;
+ int t;
+ uchar cbBounds[3];
+ uchar modMin[3];
+ uchar modMax[3];
+ CvBGCodeBookElem** cbmap;
+ CvMemStorage* storage;
+ CvBGCodeBookElem* freeList;
+} CvBGCodeBookModel;
+
+CVAPI(CvBGCodeBookModel*) cvCreateBGCodeBookModel( void );
+CVAPI(void) cvReleaseBGCodeBookModel( CvBGCodeBookModel** model );
+
+CVAPI(void) cvBGCodeBookUpdate( CvBGCodeBookModel* model, const CvArr* image,
+ CvRect roi CV_DEFAULT(cvRect(0,0,0,0)),
+ const CvArr* mask CV_DEFAULT(0) );
+
+CVAPI(int) cvBGCodeBookDiff( const CvBGCodeBookModel* model, const CvArr* image,
+ CvArr* fgmask, CvRect roi CV_DEFAULT(cvRect(0,0,0,0)) );
+
+CVAPI(void) cvBGCodeBookClearStale( CvBGCodeBookModel* model, int staleThresh,
+ CvRect roi CV_DEFAULT(cvRect(0,0,0,0)),
+ const CvArr* mask CV_DEFAULT(0) );
+
+CVAPI(CvSeq*) cvSegmentFGMask( CvArr *fgmask, int poly1Hull0 CV_DEFAULT(1),
+ float perimScale CV_DEFAULT(4.f),
+ CvMemStorage* storage CV_DEFAULT(0),
+ CvPoint offset CV_DEFAULT(cvPoint(0,0)));
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+
+/* End of file. */
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/legacy/streams.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/legacy/streams.hpp
new file mode 100644
index 00000000..e164bf4a
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/legacy/streams.hpp
@@ -0,0 +1,92 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// Intel License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000, Intel Corporation, all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of Intel Corporation may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_CVSTREAMS_H__
+#define __OPENCV_CVSTREAMS_H__
+
+#ifdef WIN32
+#include <streams.h> /* !!! IF YOU'VE GOT AN ERROR HERE, PLEASE READ BELOW !!! */
+/***************** How to get Visual Studio understand streams.h ****************\
+
+You need DirectShow SDK that is now a part of Platform SDK
+(Windows Server 2003 SP1 SDK or later),
+and DirectX SDK (2006 April or later).
+
+1. Download the Platform SDK from
+ http://www.microsoft.com/msdownload/platformsdk/sdkupdate/
+ and DirectX SDK from msdn.microsoft.com/directx/
+ (They are huge, but you can download it by parts).
+ If it doesn't work for you, consider HighGUI that can capture video via VFW or MIL
+
+2. Install Platform SDK together with DirectShow SDK.
+ Install DirectX (with or without sample code).
+
+3. Build baseclasses.
+ See <PlatformSDKInstallFolder>\samples\multimedia\directshow\readme.txt.
+
+4. Copy the built libraries (called strmbase.lib and strmbasd.lib
+ in Release and Debug versions, respectively) to
+ <PlatformSDKInstallFolder>\lib.
+
+5. In Developer Studio add the following paths:
+ <DirectXSDKInstallFolder>\include
+ <PlatformSDKInstallFolder>\include
+ <PlatformSDKInstallFolder>\samples\multimedia\directshow\baseclasses
+ to the includes' search path
+ (at Tools->Options->Directories->Include files in case of Visual Studio 6.0,
+ at Tools->Options->Projects and Solutions->VC++ Directories->Include files in case
+ of Visual Studio 2005)
+ Add
+ <DirectXSDKInstallFolder>\lib
+ <PlatformSDKInstallFolder>\lib
+ to the libraries' search path (in the same dialog, ...->"Library files" page)
+
+ NOTE: PUT THE ADDED LINES ON THE VERY TOP OF THE LISTS, OTHERWISE YOU MAY STILL GET
+ COMPILER OR LINKER ERRORS. This is necessary, because Visual Studio
+ may include older versions of the same headers and libraries.
+
+6. Now you can build OpenCV DirectShow filters.
+
+\***********************************************************************************/
+
+#endif
+
+#endif
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/ml.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/ml.hpp
new file mode 100644
index 00000000..dc62dcb0
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/ml.hpp
@@ -0,0 +1,41 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// Intel License Agreement
+//
+// Copyright (C) 2000, Intel Corporation, all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of Intel Corporation may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#include "opencv2/ml/ml.hpp"
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/ml/ml.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/ml/ml.hpp
new file mode 100644
index 00000000..d86ecde4
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/ml/ml.hpp
@@ -0,0 +1,2147 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// Intel License Agreement
+//
+// Copyright (C) 2000, Intel Corporation, all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of Intel Corporation may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_ML_HPP__
+#define __OPENCV_ML_HPP__
+
+#include "opencv2/core/core.hpp"
+#include <limits.h>
+
+#ifdef __cplusplus
+
+#include <map>
+#include <string>
+#include <iostream>
+
+// Apple defines a check() macro somewhere in the debug headers
+// that interferes with a method definiton in this header
+#undef check
+
+/****************************************************************************************\
+* Main struct definitions *
+\****************************************************************************************/
+
+/* log(2*PI) */
+#define CV_LOG2PI (1.8378770664093454835606594728112)
+
+/* columns of <trainData> matrix are training samples */
+#define CV_COL_SAMPLE 0
+
+/* rows of <trainData> matrix are training samples */
+#define CV_ROW_SAMPLE 1
+
+#define CV_IS_ROW_SAMPLE(flags) ((flags) & CV_ROW_SAMPLE)
+
+struct CvVectors
+{
+ int type;
+ int dims, count;
+ CvVectors* next;
+ union
+ {
+ uchar** ptr;
+ float** fl;
+ double** db;
+ } data;
+};
+
+#if 0
+/* A structure, representing the lattice range of statmodel parameters.
+ It is used for optimizing statmodel parameters by cross-validation method.
+ The lattice is logarithmic, so <step> must be greater then 1. */
+typedef struct CvParamLattice
+{
+ double min_val;
+ double max_val;
+ double step;
+}
+CvParamLattice;
+
+CV_INLINE CvParamLattice cvParamLattice( double min_val, double max_val,
+ double log_step )
+{
+ CvParamLattice pl;
+ pl.min_val = MIN( min_val, max_val );
+ pl.max_val = MAX( min_val, max_val );
+ pl.step = MAX( log_step, 1. );
+ return pl;
+}
+
+CV_INLINE CvParamLattice cvDefaultParamLattice( void )
+{
+ CvParamLattice pl = {0,0,0};
+ return pl;
+}
+#endif
+
+/* Variable type */
+#define CV_VAR_NUMERICAL 0
+#define CV_VAR_ORDERED 0
+#define CV_VAR_CATEGORICAL 1
+
+#define CV_TYPE_NAME_ML_SVM "opencv-ml-svm"
+#define CV_TYPE_NAME_ML_KNN "opencv-ml-knn"
+#define CV_TYPE_NAME_ML_NBAYES "opencv-ml-bayesian"
+#define CV_TYPE_NAME_ML_EM "opencv-ml-em"
+#define CV_TYPE_NAME_ML_BOOSTING "opencv-ml-boost-tree"
+#define CV_TYPE_NAME_ML_TREE "opencv-ml-tree"
+#define CV_TYPE_NAME_ML_ANN_MLP "opencv-ml-ann-mlp"
+#define CV_TYPE_NAME_ML_CNN "opencv-ml-cnn"
+#define CV_TYPE_NAME_ML_RTREES "opencv-ml-random-trees"
+#define CV_TYPE_NAME_ML_ERTREES "opencv-ml-extremely-randomized-trees"
+#define CV_TYPE_NAME_ML_GBT "opencv-ml-gradient-boosting-trees"
+
+#define CV_TRAIN_ERROR 0
+#define CV_TEST_ERROR 1
+
+class CV_EXPORTS_W CvStatModel
+{
+public:
+ CvStatModel();
+ virtual ~CvStatModel();
+
+ virtual void clear();
+
+ CV_WRAP virtual void save( const char* filename, const char* name=0 ) const;
+ CV_WRAP virtual void load( const char* filename, const char* name=0 );
+
+ virtual void write( CvFileStorage* storage, const char* name ) const;
+ virtual void read( CvFileStorage* storage, CvFileNode* node );
+
+protected:
+ const char* default_model_name;
+};
+
+/****************************************************************************************\
+* Normal Bayes Classifier *
+\****************************************************************************************/
+
+/* The structure, representing the grid range of statmodel parameters.
+ It is used for optimizing statmodel accuracy by varying model parameters,
+ the accuracy estimate being computed by cross-validation.
+ The grid is logarithmic, so <step> must be greater then 1. */
+
+class CvMLData;
+
+struct CV_EXPORTS_W_MAP CvParamGrid
+{
+ // SVM params type
+ enum { SVM_C=0, SVM_GAMMA=1, SVM_P=2, SVM_NU=3, SVM_COEF=4, SVM_DEGREE=5 };
+
+ CvParamGrid()
+ {
+ min_val = max_val = step = 0;
+ }
+
+ CvParamGrid( double min_val, double max_val, double log_step );
+ //CvParamGrid( int param_id );
+ bool check() const;
+
+ CV_PROP_RW double min_val;
+ CV_PROP_RW double max_val;
+ CV_PROP_RW double step;
+};
+
+inline CvParamGrid::CvParamGrid( double _min_val, double _max_val, double _log_step )
+{
+ min_val = _min_val;
+ max_val = _max_val;
+ step = _log_step;
+}
+
+class CV_EXPORTS_W CvNormalBayesClassifier : public CvStatModel
+{
+public:
+ CV_WRAP CvNormalBayesClassifier();
+ virtual ~CvNormalBayesClassifier();
+
+ CvNormalBayesClassifier( const CvMat* trainData, const CvMat* responses,
+ const CvMat* varIdx=0, const CvMat* sampleIdx=0 );
+
+ virtual bool train( const CvMat* trainData, const CvMat* responses,
+ const CvMat* varIdx = 0, const CvMat* sampleIdx=0, bool update=false );
+
+ virtual float predict( const CvMat* samples, CV_OUT CvMat* results=0 ) const;
+ CV_WRAP virtual void clear();
+
+ CV_WRAP CvNormalBayesClassifier( const cv::Mat& trainData, const cv::Mat& responses,
+ const cv::Mat& varIdx=cv::Mat(), const cv::Mat& sampleIdx=cv::Mat() );
+ CV_WRAP virtual bool train( const cv::Mat& trainData, const cv::Mat& responses,
+ const cv::Mat& varIdx = cv::Mat(), const cv::Mat& sampleIdx=cv::Mat(),
+ bool update=false );
+ CV_WRAP virtual float predict( const cv::Mat& samples, CV_OUT cv::Mat* results=0 ) const;
+
+ virtual void write( CvFileStorage* storage, const char* name ) const;
+ virtual void read( CvFileStorage* storage, CvFileNode* node );
+
+protected:
+ int var_count, var_all;
+ CvMat* var_idx;
+ CvMat* cls_labels;
+ CvMat** count;
+ CvMat** sum;
+ CvMat** productsum;
+ CvMat** avg;
+ CvMat** inv_eigen_values;
+ CvMat** cov_rotate_mats;
+ CvMat* c;
+};
+
+
+/****************************************************************************************\
+* K-Nearest Neighbour Classifier *
+\****************************************************************************************/
+
+// k Nearest Neighbors
+class CV_EXPORTS_W CvKNearest : public CvStatModel
+{
+public:
+
+ CV_WRAP CvKNearest();
+ virtual ~CvKNearest();
+
+ CvKNearest( const CvMat* trainData, const CvMat* responses,
+ const CvMat* sampleIdx=0, bool isRegression=false, int max_k=32 );
+
+ virtual bool train( const CvMat* trainData, const CvMat* responses,
+ const CvMat* sampleIdx=0, bool is_regression=false,
+ int maxK=32, bool updateBase=false );
+
+ virtual float find_nearest( const CvMat* samples, int k, CV_OUT CvMat* results=0,
+ const float** neighbors=0, CV_OUT CvMat* neighborResponses=0, CV_OUT CvMat* dist=0 ) const;
+
+ CV_WRAP CvKNearest( const cv::Mat& trainData, const cv::Mat& responses,
+ const cv::Mat& sampleIdx=cv::Mat(), bool isRegression=false, int max_k=32 );
+
+ CV_WRAP virtual bool train( const cv::Mat& trainData, const cv::Mat& responses,
+ const cv::Mat& sampleIdx=cv::Mat(), bool isRegression=false,
+ int maxK=32, bool updateBase=false );
+
+ virtual float find_nearest( const cv::Mat& samples, int k, cv::Mat* results=0,
+ const float** neighbors=0, cv::Mat* neighborResponses=0,
+ cv::Mat* dist=0 ) const;
+ CV_WRAP virtual float find_nearest( const cv::Mat& samples, int k, CV_OUT cv::Mat& results,
+ CV_OUT cv::Mat& neighborResponses, CV_OUT cv::Mat& dists) const;
+
+ virtual void clear();
+ int get_max_k() const;
+ int get_var_count() const;
+ int get_sample_count() const;
+ bool is_regression() const;
+
+ virtual float write_results( int k, int k1, int start, int end,
+ const float* neighbor_responses, const float* dist, CvMat* _results,
+ CvMat* _neighbor_responses, CvMat* _dist, Cv32suf* sort_buf ) const;
+
+ virtual void find_neighbors_direct( const CvMat* _samples, int k, int start, int end,
+ float* neighbor_responses, const float** neighbors, float* dist ) const;
+
+protected:
+
+ int max_k, var_count;
+ int total;
+ bool regression;
+ CvVectors* samples;
+};
+
+/****************************************************************************************\
+* Support Vector Machines *
+\****************************************************************************************/
+
+// SVM training parameters
+struct CV_EXPORTS_W_MAP CvSVMParams
+{
+ CvSVMParams();
+ CvSVMParams( int svm_type, int kernel_type,
+ double degree, double gamma, double coef0,
+ double Cvalue, double nu, double p,
+ CvMat* class_weights, CvTermCriteria term_crit );
+
+ CV_PROP_RW int svm_type;
+ CV_PROP_RW int kernel_type;
+ CV_PROP_RW double degree; // for poly
+ CV_PROP_RW double gamma; // for poly/rbf/sigmoid
+ CV_PROP_RW double coef0; // for poly/sigmoid
+
+ CV_PROP_RW double C; // for CV_SVM_C_SVC, CV_SVM_EPS_SVR and CV_SVM_NU_SVR
+ CV_PROP_RW double nu; // for CV_SVM_NU_SVC, CV_SVM_ONE_CLASS, and CV_SVM_NU_SVR
+ CV_PROP_RW double p; // for CV_SVM_EPS_SVR
+ CvMat* class_weights; // for CV_SVM_C_SVC
+ CV_PROP_RW CvTermCriteria term_crit; // termination criteria
+};
+
+
+struct CV_EXPORTS CvSVMKernel
+{
+ typedef void (CvSVMKernel::*Calc)( int vec_count, int vec_size, const float** vecs,
+ const float* another, float* results );
+ CvSVMKernel();
+ CvSVMKernel( const CvSVMParams* params, Calc _calc_func );
+ virtual bool create( const CvSVMParams* params, Calc _calc_func );
+ virtual ~CvSVMKernel();
+
+ virtual void clear();
+ virtual void calc( int vcount, int n, const float** vecs, const float* another, float* results );
+
+ const CvSVMParams* params;
+ Calc calc_func;
+
+ virtual void calc_non_rbf_base( int vec_count, int vec_size, const float** vecs,
+ const float* another, float* results,
+ double alpha, double beta );
+
+ virtual void calc_linear( int vec_count, int vec_size, const float** vecs,
+ const float* another, float* results );
+ virtual void calc_rbf( int vec_count, int vec_size, const float** vecs,
+ const float* another, float* results );
+ virtual void calc_poly( int vec_count, int vec_size, const float** vecs,
+ const float* another, float* results );
+ virtual void calc_sigmoid( int vec_count, int vec_size, const float** vecs,
+ const float* another, float* results );
+};
+
+
+struct CvSVMKernelRow
+{
+ CvSVMKernelRow* prev;
+ CvSVMKernelRow* next;
+ float* data;
+};
+
+
+struct CvSVMSolutionInfo
+{
+ double obj;
+ double rho;
+ double upper_bound_p;
+ double upper_bound_n;
+ double r; // for Solver_NU
+};
+
+class CV_EXPORTS CvSVMSolver
+{
+public:
+ typedef bool (CvSVMSolver::*SelectWorkingSet)( int& i, int& j );
+ typedef float* (CvSVMSolver::*GetRow)( int i, float* row, float* dst, bool existed );
+ typedef void (CvSVMSolver::*CalcRho)( double& rho, double& r );
+
+ CvSVMSolver();
+
+ CvSVMSolver( int count, int var_count, const float** samples, schar* y,
+ int alpha_count, double* alpha, double Cp, double Cn,
+ CvMemStorage* storage, CvSVMKernel* kernel, GetRow get_row,
+ SelectWorkingSet select_working_set, CalcRho calc_rho );
+ virtual bool create( int count, int var_count, const float** samples, schar* y,
+ int alpha_count, double* alpha, double Cp, double Cn,
+ CvMemStorage* storage, CvSVMKernel* kernel, GetRow get_row,
+ SelectWorkingSet select_working_set, CalcRho calc_rho );
+ virtual ~CvSVMSolver();
+
+ virtual void clear();
+ virtual bool solve_generic( CvSVMSolutionInfo& si );
+
+ virtual bool solve_c_svc( int count, int var_count, const float** samples, schar* y,
+ double Cp, double Cn, CvMemStorage* storage,
+ CvSVMKernel* kernel, double* alpha, CvSVMSolutionInfo& si );
+ virtual bool solve_nu_svc( int count, int var_count, const float** samples, schar* y,
+ CvMemStorage* storage, CvSVMKernel* kernel,
+ double* alpha, CvSVMSolutionInfo& si );
+ virtual bool solve_one_class( int count, int var_count, const float** samples,
+ CvMemStorage* storage, CvSVMKernel* kernel,
+ double* alpha, CvSVMSolutionInfo& si );
+
+ virtual bool solve_eps_svr( int count, int var_count, const float** samples, const float* y,
+ CvMemStorage* storage, CvSVMKernel* kernel,
+ double* alpha, CvSVMSolutionInfo& si );
+
+ virtual bool solve_nu_svr( int count, int var_count, const float** samples, const float* y,
+ CvMemStorage* storage, CvSVMKernel* kernel,
+ double* alpha, CvSVMSolutionInfo& si );
+
+ virtual float* get_row_base( int i, bool* _existed );
+ virtual float* get_row( int i, float* dst );
+
+ int sample_count;
+ int var_count;
+ int cache_size;
+ int cache_line_size;
+ const float** samples;
+ const CvSVMParams* params;
+ CvMemStorage* storage;
+ CvSVMKernelRow lru_list;
+ CvSVMKernelRow* rows;
+
+ int alpha_count;
+
+ double* G;
+ double* alpha;
+
+ // -1 - lower bound, 0 - free, 1 - upper bound
+ schar* alpha_status;
+
+ schar* y;
+ double* b;
+ float* buf[2];
+ double eps;
+ int max_iter;
+ double C[2]; // C[0] == Cn, C[1] == Cp
+ CvSVMKernel* kernel;
+
+ SelectWorkingSet select_working_set_func;
+ CalcRho calc_rho_func;
+ GetRow get_row_func;
+
+ virtual bool select_working_set( int& i, int& j );
+ virtual bool select_working_set_nu_svm( int& i, int& j );
+ virtual void calc_rho( double& rho, double& r );
+ virtual void calc_rho_nu_svm( double& rho, double& r );
+
+ virtual float* get_row_svc( int i, float* row, float* dst, bool existed );
+ virtual float* get_row_one_class( int i, float* row, float* dst, bool existed );
+ virtual float* get_row_svr( int i, float* row, float* dst, bool existed );
+};
+
+
+struct CvSVMDecisionFunc
+{
+ double rho;
+ int sv_count;
+ double* alpha;
+ int* sv_index;
+};
+
+
+// SVM model
+class CV_EXPORTS_W CvSVM : public CvStatModel
+{
+public:
+ // SVM type
+ enum { C_SVC=100, NU_SVC=101, ONE_CLASS=102, EPS_SVR=103, NU_SVR=104 };
+
+ // SVM kernel type
+ enum { LINEAR=0, POLY=1, RBF=2, SIGMOID=3 };
+
+ // SVM params type
+ enum { C=0, GAMMA=1, P=2, NU=3, COEF=4, DEGREE=5 };
+
+ CV_WRAP CvSVM();
+ virtual ~CvSVM();
+
+ CvSVM( const CvMat* trainData, const CvMat* responses,
+ const CvMat* varIdx=0, const CvMat* sampleIdx=0,
+ CvSVMParams params=CvSVMParams() );
+
+ virtual bool train( const CvMat* trainData, const CvMat* responses,
+ const CvMat* varIdx=0, const CvMat* sampleIdx=0,
+ CvSVMParams params=CvSVMParams() );
+
+ virtual bool train_auto( const CvMat* trainData, const CvMat* responses,
+ const CvMat* varIdx, const CvMat* sampleIdx, CvSVMParams params,
+ int kfold = 10,
+ CvParamGrid Cgrid = get_default_grid(CvSVM::C),
+ CvParamGrid gammaGrid = get_default_grid(CvSVM::GAMMA),
+ CvParamGrid pGrid = get_default_grid(CvSVM::P),
+ CvParamGrid nuGrid = get_default_grid(CvSVM::NU),
+ CvParamGrid coeffGrid = get_default_grid(CvSVM::COEF),
+ CvParamGrid degreeGrid = get_default_grid(CvSVM::DEGREE),
+ bool balanced=false );
+
+ virtual float predict( const CvMat* sample, bool returnDFVal=false ) const;
+ virtual float predict( const CvMat* samples, CV_OUT CvMat* results ) const;
+
+ CV_WRAP CvSVM( const cv::Mat& trainData, const cv::Mat& responses,
+ const cv::Mat& varIdx=cv::Mat(), const cv::Mat& sampleIdx=cv::Mat(),
+ CvSVMParams params=CvSVMParams() );
+
+ CV_WRAP virtual bool train( const cv::Mat& trainData, const cv::Mat& responses,
+ const cv::Mat& varIdx=cv::Mat(), const cv::Mat& sampleIdx=cv::Mat(),
+ CvSVMParams params=CvSVMParams() );
+
+ CV_WRAP virtual bool train_auto( const cv::Mat& trainData, const cv::Mat& responses,
+ const cv::Mat& varIdx, const cv::Mat& sampleIdx, CvSVMParams params,
+ int k_fold = 10,
+ CvParamGrid Cgrid = CvSVM::get_default_grid(CvSVM::C),
+ CvParamGrid gammaGrid = CvSVM::get_default_grid(CvSVM::GAMMA),
+ CvParamGrid pGrid = CvSVM::get_default_grid(CvSVM::P),
+ CvParamGrid nuGrid = CvSVM::get_default_grid(CvSVM::NU),
+ CvParamGrid coeffGrid = CvSVM::get_default_grid(CvSVM::COEF),
+ CvParamGrid degreeGrid = CvSVM::get_default_grid(CvSVM::DEGREE),
+ bool balanced=false);
+ CV_WRAP virtual float predict( const cv::Mat& sample, bool returnDFVal=false ) const;
+ CV_WRAP_AS(predict_all) void predict( cv::InputArray samples, cv::OutputArray results ) const;
+
+ CV_WRAP virtual int get_support_vector_count() const;
+ virtual const float* get_support_vector(int i) const;
+ virtual CvSVMParams get_params() const { return params; };
+ CV_WRAP virtual void clear();
+
+ static CvParamGrid get_default_grid( int param_id );
+
+ virtual void write( CvFileStorage* storage, const char* name ) const;
+ virtual void read( CvFileStorage* storage, CvFileNode* node );
+ CV_WRAP int get_var_count() const { return var_idx ? var_idx->cols : var_all; }
+
+protected:
+
+ virtual bool set_params( const CvSVMParams& params );
+ virtual bool train1( int sample_count, int var_count, const float** samples,
+ const void* responses, double Cp, double Cn,
+ CvMemStorage* _storage, double* alpha, double& rho );
+ virtual bool do_train( int svm_type, int sample_count, int var_count, const float** samples,
+ const CvMat* responses, CvMemStorage* _storage, double* alpha );
+ virtual void create_kernel();
+ virtual void create_solver();
+
+ virtual float predict( const float* row_sample, int row_len, bool returnDFVal=false ) const;
+
+ virtual void write_params( CvFileStorage* fs ) const;
+ virtual void read_params( CvFileStorage* fs, CvFileNode* node );
+
+ void optimize_linear_svm();
+
+ CvSVMParams params;
+ CvMat* class_labels;
+ int var_all;
+ float** sv;
+ int sv_total;
+ CvMat* var_idx;
+ CvMat* class_weights;
+ CvSVMDecisionFunc* decision_func;
+ CvMemStorage* storage;
+
+ CvSVMSolver* solver;
+ CvSVMKernel* kernel;
+
+private:
+ CvSVM(const CvSVM&);
+ CvSVM& operator = (const CvSVM&);
+};
+
+/****************************************************************************************\
+* Expectation - Maximization *
+\****************************************************************************************/
+namespace cv
+{
+class CV_EXPORTS_W EM : public Algorithm
+{
+public:
+ // Type of covariation matrices
+ enum {COV_MAT_SPHERICAL=0, COV_MAT_DIAGONAL=1, COV_MAT_GENERIC=2, COV_MAT_DEFAULT=COV_MAT_DIAGONAL};
+
+ // Default parameters
+ enum {DEFAULT_NCLUSTERS=5, DEFAULT_MAX_ITERS=100};
+
+ // The initial step
+ enum {START_E_STEP=1, START_M_STEP=2, START_AUTO_STEP=0};
+
+ CV_WRAP EM(int nclusters=EM::DEFAULT_NCLUSTERS, int covMatType=EM::COV_MAT_DIAGONAL,
+ const TermCriteria& termCrit=TermCriteria(TermCriteria::COUNT+TermCriteria::EPS,
+ EM::DEFAULT_MAX_ITERS, FLT_EPSILON));
+
+ virtual ~EM();
+ CV_WRAP virtual void clear();
+
+ CV_WRAP virtual bool train(InputArray samples,
+ OutputArray logLikelihoods=noArray(),
+ OutputArray labels=noArray(),
+ OutputArray probs=noArray());
+
+ CV_WRAP virtual bool trainE(InputArray samples,
+ InputArray means0,
+ InputArray covs0=noArray(),
+ InputArray weights0=noArray(),
+ OutputArray logLikelihoods=noArray(),
+ OutputArray labels=noArray(),
+ OutputArray probs=noArray());
+
+ CV_WRAP virtual bool trainM(InputArray samples,
+ InputArray probs0,
+ OutputArray logLikelihoods=noArray(),
+ OutputArray labels=noArray(),
+ OutputArray probs=noArray());
+
+ CV_WRAP Vec2d predict(InputArray sample,
+ OutputArray probs=noArray()) const;
+
+ CV_WRAP bool isTrained() const;
+
+ AlgorithmInfo* info() const;
+ virtual void read(const FileNode& fn);
+
+protected:
+
+ virtual void setTrainData(int startStep, const Mat& samples,
+ const Mat* probs0,
+ const Mat* means0,
+ const vector<Mat>* covs0,
+ const Mat* weights0);
+
+ bool doTrain(int startStep,
+ OutputArray logLikelihoods,
+ OutputArray labels,
+ OutputArray probs);
+ virtual void eStep();
+ virtual void mStep();
+
+ void clusterTrainSamples();
+ void decomposeCovs();
+ void computeLogWeightDivDet();
+
+ Vec2d computeProbabilities(const Mat& sample, Mat* probs) const;
+
+ // all inner matrices have type CV_64FC1
+ CV_PROP_RW int nclusters;
+ CV_PROP_RW int covMatType;
+ CV_PROP_RW int maxIters;
+ CV_PROP_RW double epsilon;
+
+ Mat trainSamples;
+ Mat trainProbs;
+ Mat trainLogLikelihoods;
+ Mat trainLabels;
+
+ CV_PROP Mat weights;
+ CV_PROP Mat means;
+ CV_PROP vector<Mat> covs;
+
+ vector<Mat> covsEigenValues;
+ vector<Mat> covsRotateMats;
+ vector<Mat> invCovsEigenValues;
+ Mat logWeightDivDet;
+};
+} // namespace cv
+
+/****************************************************************************************\
+* Decision Tree *
+\****************************************************************************************/\
+struct CvPair16u32s
+{
+ unsigned short* u;
+ int* i;
+};
+
+
+#define CV_DTREE_CAT_DIR(idx,subset) \
+ (2*((subset[(idx)>>5]&(1 << ((idx) & 31)))==0)-1)
+
+struct CvDTreeSplit
+{
+ int var_idx;
+ int condensed_idx;
+ int inversed;
+ float quality;
+ CvDTreeSplit* next;
+ union
+ {
+ int subset[2];
+ struct
+ {
+ float c;
+ int split_point;
+ }
+ ord;
+ };
+};
+
+struct CvDTreeNode
+{
+ int class_idx;
+ int Tn;
+ double value;
+
+ CvDTreeNode* parent;
+ CvDTreeNode* left;
+ CvDTreeNode* right;
+
+ CvDTreeSplit* split;
+
+ int sample_count;
+ int depth;
+ int* num_valid;
+ int offset;
+ int buf_idx;
+ double maxlr;
+
+ // global pruning data
+ int complexity;
+ double alpha;
+ double node_risk, tree_risk, tree_error;
+
+ // cross-validation pruning data
+ int* cv_Tn;
+ double* cv_node_risk;
+ double* cv_node_error;
+
+ int get_num_valid(int vi) { return num_valid ? num_valid[vi] : sample_count; }
+ void set_num_valid(int vi, int n) { if( num_valid ) num_valid[vi] = n; }
+};
+
+
+struct CV_EXPORTS_W_MAP CvDTreeParams
+{
+ CV_PROP_RW int max_categories;
+ CV_PROP_RW int max_depth;
+ CV_PROP_RW int min_sample_count;
+ CV_PROP_RW int cv_folds;
+ CV_PROP_RW bool use_surrogates;
+ CV_PROP_RW bool use_1se_rule;
+ CV_PROP_RW bool truncate_pruned_tree;
+ CV_PROP_RW float regression_accuracy;
+ const float* priors;
+
+ CvDTreeParams();
+ CvDTreeParams( int max_depth, int min_sample_count,
+ float regression_accuracy, bool use_surrogates,
+ int max_categories, int cv_folds,
+ bool use_1se_rule, bool truncate_pruned_tree,
+ const float* priors );
+};
+
+
+struct CV_EXPORTS CvDTreeTrainData
+{
+ CvDTreeTrainData();
+ CvDTreeTrainData( const CvMat* trainData, int tflag,
+ const CvMat* responses, const CvMat* varIdx=0,
+ const CvMat* sampleIdx=0, const CvMat* varType=0,
+ const CvMat* missingDataMask=0,
+ const CvDTreeParams& params=CvDTreeParams(),
+ bool _shared=false, bool _add_labels=false );
+ virtual ~CvDTreeTrainData();
+
+ virtual void set_data( const CvMat* trainData, int tflag,
+ const CvMat* responses, const CvMat* varIdx=0,
+ const CvMat* sampleIdx=0, const CvMat* varType=0,
+ const CvMat* missingDataMask=0,
+ const CvDTreeParams& params=CvDTreeParams(),
+ bool _shared=false, bool _add_labels=false,
+ bool _update_data=false );
+ virtual void do_responses_copy();
+
+ virtual void get_vectors( const CvMat* _subsample_idx,
+ float* values, uchar* missing, float* responses, bool get_class_idx=false );
+
+ virtual CvDTreeNode* subsample_data( const CvMat* _subsample_idx );
+
+ virtual void write_params( CvFileStorage* fs ) const;
+ virtual void read_params( CvFileStorage* fs, CvFileNode* node );
+
+ // release all the data
+ virtual void clear();
+
+ int get_num_classes() const;
+ int get_var_type(int vi) const;
+ int get_work_var_count() const {return work_var_count;}
+
+ virtual const float* get_ord_responses( CvDTreeNode* n, float* values_buf, int* sample_indices_buf );
+ virtual const int* get_class_labels( CvDTreeNode* n, int* labels_buf );
+ virtual const int* get_cv_labels( CvDTreeNode* n, int* labels_buf );
+ virtual const int* get_sample_indices( CvDTreeNode* n, int* indices_buf );
+ virtual const int* get_cat_var_data( CvDTreeNode* n, int vi, int* cat_values_buf );
+ virtual void get_ord_var_data( CvDTreeNode* n, int vi, float* ord_values_buf, int* sorted_indices_buf,
+ const float** ord_values, const int** sorted_indices, int* sample_indices_buf );
+ virtual int get_child_buf_idx( CvDTreeNode* n );
+
+ ////////////////////////////////////
+
+ virtual bool set_params( const CvDTreeParams& params );
+ virtual CvDTreeNode* new_node( CvDTreeNode* parent, int count,
+ int storage_idx, int offset );
+
+ virtual CvDTreeSplit* new_split_ord( int vi, float cmp_val,
+ int split_point, int inversed, float quality );
+ virtual CvDTreeSplit* new_split_cat( int vi, float quality );
+ virtual void free_node_data( CvDTreeNode* node );
+ virtual void free_train_data();
+ virtual void free_node( CvDTreeNode* node );
+
+ int sample_count, var_all, var_count, max_c_count;
+ int ord_var_count, cat_var_count, work_var_count;
+ bool have_labels, have_priors;
+ bool is_classifier;
+ int tflag;
+
+ const CvMat* train_data;
+ const CvMat* responses;
+ CvMat* responses_copy; // used in Boosting
+
+ int buf_count, buf_size; // buf_size is obsolete, please do not use it, use expression ((int64)buf->rows * (int64)buf->cols / buf_count) instead
+ bool shared;
+ int is_buf_16u;
+
+ CvMat* cat_count;
+ CvMat* cat_ofs;
+ CvMat* cat_map;
+
+ CvMat* counts;
+ CvMat* buf;
+ inline size_t get_length_subbuf() const
+ {
+ size_t res = (size_t)(work_var_count + 1) * (size_t)sample_count;
+ return res;
+ }
+
+ CvMat* direction;
+ CvMat* split_buf;
+
+ CvMat* var_idx;
+ CvMat* var_type; // i-th element =
+ // k<0 - ordered
+ // k>=0 - categorical, see k-th element of cat_* arrays
+ CvMat* priors;
+ CvMat* priors_mult;
+
+ CvDTreeParams params;
+
+ CvMemStorage* tree_storage;
+ CvMemStorage* temp_storage;
+
+ CvDTreeNode* data_root;
+
+ CvSet* node_heap;
+ CvSet* split_heap;
+ CvSet* cv_heap;
+ CvSet* nv_heap;
+
+ cv::RNG* rng;
+};
+
+class CvDTree;
+class CvForestTree;
+
+namespace cv
+{
+ struct DTreeBestSplitFinder;
+ struct ForestTreeBestSplitFinder;
+}
+
+class CV_EXPORTS_W CvDTree : public CvStatModel
+{
+public:
+ CV_WRAP CvDTree();
+ virtual ~CvDTree();
+
+ virtual bool train( const CvMat* trainData, int tflag,
+ const CvMat* responses, const CvMat* varIdx=0,
+ const CvMat* sampleIdx=0, const CvMat* varType=0,
+ const CvMat* missingDataMask=0,
+ CvDTreeParams params=CvDTreeParams() );
+
+ virtual bool train( CvMLData* trainData, CvDTreeParams params=CvDTreeParams() );
+
+ // type in {CV_TRAIN_ERROR, CV_TEST_ERROR}
+ virtual float calc_error( CvMLData* trainData, int type, std::vector<float> *resp = 0 );
+
+ virtual bool train( CvDTreeTrainData* trainData, const CvMat* subsampleIdx );
+
+ virtual CvDTreeNode* predict( const CvMat* sample, const CvMat* missingDataMask=0,
+ bool preprocessedInput=false ) const;
+
+ CV_WRAP virtual bool train( const cv::Mat& trainData, int tflag,
+ const cv::Mat& responses, const cv::Mat& varIdx=cv::Mat(),
+ const cv::Mat& sampleIdx=cv::Mat(), const cv::Mat& varType=cv::Mat(),
+ const cv::Mat& missingDataMask=cv::Mat(),
+ CvDTreeParams params=CvDTreeParams() );
+
+ CV_WRAP virtual CvDTreeNode* predict( const cv::Mat& sample, const cv::Mat& missingDataMask=cv::Mat(),
+ bool preprocessedInput=false ) const;
+ CV_WRAP virtual cv::Mat getVarImportance();
+
+ virtual const CvMat* get_var_importance();
+ CV_WRAP virtual void clear();
+
+ virtual void read( CvFileStorage* fs, CvFileNode* node );
+ virtual void write( CvFileStorage* fs, const char* name ) const;
+
+ // special read & write methods for trees in the tree ensembles
+ virtual void read( CvFileStorage* fs, CvFileNode* node,
+ CvDTreeTrainData* data );
+ virtual void write( CvFileStorage* fs ) const;
+
+ const CvDTreeNode* get_root() const;
+ int get_pruned_tree_idx() const;
+ CvDTreeTrainData* get_data();
+
+protected:
+ friend struct cv::DTreeBestSplitFinder;
+
+ virtual bool do_train( const CvMat* _subsample_idx );
+
+ virtual void try_split_node( CvDTreeNode* n );
+ virtual void split_node_data( CvDTreeNode* n );
+ virtual CvDTreeSplit* find_best_split( CvDTreeNode* n );
+ virtual CvDTreeSplit* find_split_ord_class( CvDTreeNode* n, int vi,
+ float init_quality = 0, CvDTreeSplit* _split = 0, uchar* ext_buf = 0 );
+ virtual CvDTreeSplit* find_split_cat_class( CvDTreeNode* n, int vi,
+ float init_quality = 0, CvDTreeSplit* _split = 0, uchar* ext_buf = 0 );
+ virtual CvDTreeSplit* find_split_ord_reg( CvDTreeNode* n, int vi,
+ float init_quality = 0, CvDTreeSplit* _split = 0, uchar* ext_buf = 0 );
+ virtual CvDTreeSplit* find_split_cat_reg( CvDTreeNode* n, int vi,
+ float init_quality = 0, CvDTreeSplit* _split = 0, uchar* ext_buf = 0 );
+ virtual CvDTreeSplit* find_surrogate_split_ord( CvDTreeNode* n, int vi, uchar* ext_buf = 0 );
+ virtual CvDTreeSplit* find_surrogate_split_cat( CvDTreeNode* n, int vi, uchar* ext_buf = 0 );
+ virtual double calc_node_dir( CvDTreeNode* node );
+ virtual void complete_node_dir( CvDTreeNode* node );
+ virtual void cluster_categories( const int* vectors, int vector_count,
+ int var_count, int* sums, int k, int* cluster_labels );
+
+ virtual void calc_node_value( CvDTreeNode* node );
+
+ virtual void prune_cv();
+ virtual double update_tree_rnc( int T, int fold );
+ virtual int cut_tree( int T, int fold, double min_alpha );
+ virtual void free_prune_data(bool cut_tree);
+ virtual void free_tree();
+
+ virtual void write_node( CvFileStorage* fs, CvDTreeNode* node ) const;
+ virtual void write_split( CvFileStorage* fs, CvDTreeSplit* split ) const;
+ virtual CvDTreeNode* read_node( CvFileStorage* fs, CvFileNode* node, CvDTreeNode* parent );
+ virtual CvDTreeSplit* read_split( CvFileStorage* fs, CvFileNode* node );
+ virtual void write_tree_nodes( CvFileStorage* fs ) const;
+ virtual void read_tree_nodes( CvFileStorage* fs, CvFileNode* node );
+
+ CvDTreeNode* root;
+ CvMat* var_importance;
+ CvDTreeTrainData* data;
+
+public:
+ int pruned_tree_idx;
+};
+
+
+/****************************************************************************************\
+* Random Trees Classifier *
+\****************************************************************************************/
+
+class CvRTrees;
+
+class CV_EXPORTS CvForestTree: public CvDTree
+{
+public:
+ CvForestTree();
+ virtual ~CvForestTree();
+
+ virtual bool train( CvDTreeTrainData* trainData, const CvMat* _subsample_idx, CvRTrees* forest );
+
+ virtual int get_var_count() const {return data ? data->var_count : 0;}
+ virtual void read( CvFileStorage* fs, CvFileNode* node, CvRTrees* forest, CvDTreeTrainData* _data );
+
+ /* dummy methods to avoid warnings: BEGIN */
+ virtual bool train( const CvMat* trainData, int tflag,
+ const CvMat* responses, const CvMat* varIdx=0,
+ const CvMat* sampleIdx=0, const CvMat* varType=0,
+ const CvMat* missingDataMask=0,
+ CvDTreeParams params=CvDTreeParams() );
+
+ virtual bool train( CvDTreeTrainData* trainData, const CvMat* _subsample_idx );
+ virtual void read( CvFileStorage* fs, CvFileNode* node );
+ virtual void read( CvFileStorage* fs, CvFileNode* node,
+ CvDTreeTrainData* data );
+ /* dummy methods to avoid warnings: END */
+
+protected:
+ friend struct cv::ForestTreeBestSplitFinder;
+
+ virtual CvDTreeSplit* find_best_split( CvDTreeNode* n );
+ CvRTrees* forest;
+};
+
+
+struct CV_EXPORTS_W_MAP CvRTParams : public CvDTreeParams
+{
+ //Parameters for the forest
+ CV_PROP_RW bool calc_var_importance; // true <=> RF processes variable importance
+ CV_PROP_RW int nactive_vars;
+ CV_PROP_RW CvTermCriteria term_crit;
+
+ CvRTParams();
+ CvRTParams( int max_depth, int min_sample_count,
+ float regression_accuracy, bool use_surrogates,
+ int max_categories, const float* priors, bool calc_var_importance,
+ int nactive_vars, int max_num_of_trees_in_the_forest,
+ float forest_accuracy, int termcrit_type );
+};
+
+
+class CV_EXPORTS_W CvRTrees : public CvStatModel
+{
+public:
+ CV_WRAP CvRTrees();
+ virtual ~CvRTrees();
+ virtual bool train( const CvMat* trainData, int tflag,
+ const CvMat* responses, const CvMat* varIdx=0,
+ const CvMat* sampleIdx=0, const CvMat* varType=0,
+ const CvMat* missingDataMask=0,
+ CvRTParams params=CvRTParams() );
+
+ virtual bool train( CvMLData* data, CvRTParams params=CvRTParams() );
+ virtual float predict( const CvMat* sample, const CvMat* missing = 0 ) const;
+ virtual float predict_prob( const CvMat* sample, const CvMat* missing = 0 ) const;
+
+ CV_WRAP virtual bool train( const cv::Mat& trainData, int tflag,
+ const cv::Mat& responses, const cv::Mat& varIdx=cv::Mat(),
+ const cv::Mat& sampleIdx=cv::Mat(), const cv::Mat& varType=cv::Mat(),
+ const cv::Mat& missingDataMask=cv::Mat(),
+ CvRTParams params=CvRTParams() );
+ CV_WRAP virtual float predict( const cv::Mat& sample, const cv::Mat& missing = cv::Mat() ) const;
+ CV_WRAP virtual float predict_prob( const cv::Mat& sample, const cv::Mat& missing = cv::Mat() ) const;
+ CV_WRAP virtual cv::Mat getVarImportance();
+
+ CV_WRAP virtual void clear();
+
+ virtual const CvMat* get_var_importance();
+ virtual float get_proximity( const CvMat* sample1, const CvMat* sample2,
+ const CvMat* missing1 = 0, const CvMat* missing2 = 0 ) const;
+
+ virtual float calc_error( CvMLData* data, int type , std::vector<float>* resp = 0 ); // type in {CV_TRAIN_ERROR, CV_TEST_ERROR}
+
+ virtual float get_train_error();
+
+ virtual void read( CvFileStorage* fs, CvFileNode* node );
+ virtual void write( CvFileStorage* fs, const char* name ) const;
+
+ CvMat* get_active_var_mask();
+ CvRNG* get_rng();
+
+ int get_tree_count() const;
+ CvForestTree* get_tree(int i) const;
+
+protected:
+ virtual std::string getName() const;
+
+ virtual bool grow_forest( const CvTermCriteria term_crit );
+
+ // array of the trees of the forest
+ CvForestTree** trees;
+ CvDTreeTrainData* data;
+ int ntrees;
+ int nclasses;
+ double oob_error;
+ CvMat* var_importance;
+ int nsamples;
+
+ cv::RNG* rng;
+ CvMat* active_var_mask;
+};
+
+/****************************************************************************************\
+* Extremely randomized trees Classifier *
+\****************************************************************************************/
+struct CV_EXPORTS CvERTreeTrainData : public CvDTreeTrainData
+{
+ virtual void set_data( const CvMat* trainData, int tflag,
+ const CvMat* responses, const CvMat* varIdx=0,
+ const CvMat* sampleIdx=0, const CvMat* varType=0,
+ const CvMat* missingDataMask=0,
+ const CvDTreeParams& params=CvDTreeParams(),
+ bool _shared=false, bool _add_labels=false,
+ bool _update_data=false );
+ virtual void get_ord_var_data( CvDTreeNode* n, int vi, float* ord_values_buf, int* missing_buf,
+ const float** ord_values, const int** missing, int* sample_buf = 0 );
+ virtual const int* get_sample_indices( CvDTreeNode* n, int* indices_buf );
+ virtual const int* get_cv_labels( CvDTreeNode* n, int* labels_buf );
+ virtual const int* get_cat_var_data( CvDTreeNode* n, int vi, int* cat_values_buf );
+ virtual void get_vectors( const CvMat* _subsample_idx, float* values, uchar* missing,
+ float* responses, bool get_class_idx=false );
+ virtual CvDTreeNode* subsample_data( const CvMat* _subsample_idx );
+ const CvMat* missing_mask;
+};
+
+class CV_EXPORTS CvForestERTree : public CvForestTree
+{
+protected:
+ virtual double calc_node_dir( CvDTreeNode* node );
+ virtual CvDTreeSplit* find_split_ord_class( CvDTreeNode* n, int vi,
+ float init_quality = 0, CvDTreeSplit* _split = 0, uchar* ext_buf = 0 );
+ virtual CvDTreeSplit* find_split_cat_class( CvDTreeNode* n, int vi,
+ float init_quality = 0, CvDTreeSplit* _split = 0, uchar* ext_buf = 0 );
+ virtual CvDTreeSplit* find_split_ord_reg( CvDTreeNode* n, int vi,
+ float init_quality = 0, CvDTreeSplit* _split = 0, uchar* ext_buf = 0 );
+ virtual CvDTreeSplit* find_split_cat_reg( CvDTreeNode* n, int vi,
+ float init_quality = 0, CvDTreeSplit* _split = 0, uchar* ext_buf = 0 );
+ virtual void split_node_data( CvDTreeNode* n );
+};
+
+class CV_EXPORTS_W CvERTrees : public CvRTrees
+{
+public:
+ CV_WRAP CvERTrees();
+ virtual ~CvERTrees();
+ virtual bool train( const CvMat* trainData, int tflag,
+ const CvMat* responses, const CvMat* varIdx=0,
+ const CvMat* sampleIdx=0, const CvMat* varType=0,
+ const CvMat* missingDataMask=0,
+ CvRTParams params=CvRTParams());
+ CV_WRAP virtual bool train( const cv::Mat& trainData, int tflag,
+ const cv::Mat& responses, const cv::Mat& varIdx=cv::Mat(),
+ const cv::Mat& sampleIdx=cv::Mat(), const cv::Mat& varType=cv::Mat(),
+ const cv::Mat& missingDataMask=cv::Mat(),
+ CvRTParams params=CvRTParams());
+ virtual bool train( CvMLData* data, CvRTParams params=CvRTParams() );
+protected:
+ virtual std::string getName() const;
+ virtual bool grow_forest( const CvTermCriteria term_crit );
+};
+
+
+/****************************************************************************************\
+* Boosted tree classifier *
+\****************************************************************************************/
+
+struct CV_EXPORTS_W_MAP CvBoostParams : public CvDTreeParams
+{
+ CV_PROP_RW int boost_type;
+ CV_PROP_RW int weak_count;
+ CV_PROP_RW int split_criteria;
+ CV_PROP_RW double weight_trim_rate;
+
+ CvBoostParams();
+ CvBoostParams( int boost_type, int weak_count, double weight_trim_rate,
+ int max_depth, bool use_surrogates, const float* priors );
+};
+
+
+class CvBoost;
+
+class CV_EXPORTS CvBoostTree: public CvDTree
+{
+public:
+ CvBoostTree();
+ virtual ~CvBoostTree();
+
+ virtual bool train( CvDTreeTrainData* trainData,
+ const CvMat* subsample_idx, CvBoost* ensemble );
+
+ virtual void scale( double s );
+ virtual void read( CvFileStorage* fs, CvFileNode* node,
+ CvBoost* ensemble, CvDTreeTrainData* _data );
+ virtual void clear();
+
+ /* dummy methods to avoid warnings: BEGIN */
+ virtual bool train( const CvMat* trainData, int tflag,
+ const CvMat* responses, const CvMat* varIdx=0,
+ const CvMat* sampleIdx=0, const CvMat* varType=0,
+ const CvMat* missingDataMask=0,
+ CvDTreeParams params=CvDTreeParams() );
+ virtual bool train( CvDTreeTrainData* trainData, const CvMat* _subsample_idx );
+
+ virtual void read( CvFileStorage* fs, CvFileNode* node );
+ virtual void read( CvFileStorage* fs, CvFileNode* node,
+ CvDTreeTrainData* data );
+ /* dummy methods to avoid warnings: END */
+
+protected:
+
+ virtual void try_split_node( CvDTreeNode* n );
+ virtual CvDTreeSplit* find_surrogate_split_ord( CvDTreeNode* n, int vi, uchar* ext_buf = 0 );
+ virtual CvDTreeSplit* find_surrogate_split_cat( CvDTreeNode* n, int vi, uchar* ext_buf = 0 );
+ virtual CvDTreeSplit* find_split_ord_class( CvDTreeNode* n, int vi,
+ float init_quality = 0, CvDTreeSplit* _split = 0, uchar* ext_buf = 0 );
+ virtual CvDTreeSplit* find_split_cat_class( CvDTreeNode* n, int vi,
+ float init_quality = 0, CvDTreeSplit* _split = 0, uchar* ext_buf = 0 );
+ virtual CvDTreeSplit* find_split_ord_reg( CvDTreeNode* n, int vi,
+ float init_quality = 0, CvDTreeSplit* _split = 0, uchar* ext_buf = 0 );
+ virtual CvDTreeSplit* find_split_cat_reg( CvDTreeNode* n, int vi,
+ float init_quality = 0, CvDTreeSplit* _split = 0, uchar* ext_buf = 0 );
+ virtual void calc_node_value( CvDTreeNode* n );
+ virtual double calc_node_dir( CvDTreeNode* n );
+
+ CvBoost* ensemble;
+};
+
+
+class CV_EXPORTS_W CvBoost : public CvStatModel
+{
+public:
+ // Boosting type
+ enum { DISCRETE=0, REAL=1, LOGIT=2, GENTLE=3 };
+
+ // Splitting criteria
+ enum { DEFAULT=0, GINI=1, MISCLASS=3, SQERR=4 };
+
+ CV_WRAP CvBoost();
+ virtual ~CvBoost();
+
+ CvBoost( const CvMat* trainData, int tflag,
+ const CvMat* responses, const CvMat* varIdx=0,
+ const CvMat* sampleIdx=0, const CvMat* varType=0,
+ const CvMat* missingDataMask=0,
+ CvBoostParams params=CvBoostParams() );
+
+ virtual bool train( const CvMat* trainData, int tflag,
+ const CvMat* responses, const CvMat* varIdx=0,
+ const CvMat* sampleIdx=0, const CvMat* varType=0,
+ const CvMat* missingDataMask=0,
+ CvBoostParams params=CvBoostParams(),
+ bool update=false );
+
+ virtual bool train( CvMLData* data,
+ CvBoostParams params=CvBoostParams(),
+ bool update=false );
+
+ virtual float predict( const CvMat* sample, const CvMat* missing=0,
+ CvMat* weak_responses=0, CvSlice slice=CV_WHOLE_SEQ,
+ bool raw_mode=false, bool return_sum=false ) const;
+
+ CV_WRAP CvBoost( const cv::Mat& trainData, int tflag,
+ const cv::Mat& responses, const cv::Mat& varIdx=cv::Mat(),
+ const cv::Mat& sampleIdx=cv::Mat(), const cv::Mat& varType=cv::Mat(),
+ const cv::Mat& missingDataMask=cv::Mat(),
+ CvBoostParams params=CvBoostParams() );
+
+ CV_WRAP virtual bool train( const cv::Mat& trainData, int tflag,
+ const cv::Mat& responses, const cv::Mat& varIdx=cv::Mat(),
+ const cv::Mat& sampleIdx=cv::Mat(), const cv::Mat& varType=cv::Mat(),
+ const cv::Mat& missingDataMask=cv::Mat(),
+ CvBoostParams params=CvBoostParams(),
+ bool update=false );
+
+ CV_WRAP virtual float predict( const cv::Mat& sample, const cv::Mat& missing=cv::Mat(),
+ const cv::Range& slice=cv::Range::all(), bool rawMode=false,
+ bool returnSum=false ) const;
+
+ virtual float calc_error( CvMLData* _data, int type , std::vector<float> *resp = 0 ); // type in {CV_TRAIN_ERROR, CV_TEST_ERROR}
+
+ CV_WRAP virtual void prune( CvSlice slice );
+
+ CV_WRAP virtual void clear();
+
+ virtual void write( CvFileStorage* storage, const char* name ) const;
+ virtual void read( CvFileStorage* storage, CvFileNode* node );
+ virtual const CvMat* get_active_vars(bool absolute_idx=true);
+
+ CvSeq* get_weak_predictors();
+
+ CvMat* get_weights();
+ CvMat* get_subtree_weights();
+ CvMat* get_weak_response();
+ const CvBoostParams& get_params() const;
+ const CvDTreeTrainData* get_data() const;
+
+protected:
+
+ void update_weights_impl( CvBoostTree* tree, double initial_weights[2] );
+
+ virtual bool set_params( const CvBoostParams& params );
+ virtual void update_weights( CvBoostTree* tree );
+ virtual void trim_weights();
+ virtual void write_params( CvFileStorage* fs ) const;
+ virtual void read_params( CvFileStorage* fs, CvFileNode* node );
+
+ CvDTreeTrainData* data;
+ CvBoostParams params;
+ CvSeq* weak;
+
+ CvMat* active_vars;
+ CvMat* active_vars_abs;
+ bool have_active_cat_vars;
+
+ CvMat* orig_response;
+ CvMat* sum_response;
+ CvMat* weak_eval;
+ CvMat* subsample_mask;
+ CvMat* weights;
+ CvMat* subtree_weights;
+ bool have_subsample;
+};
+
+
+/****************************************************************************************\
+* Gradient Boosted Trees *
+\****************************************************************************************/
+
+// DataType: STRUCT CvGBTreesParams
+// Parameters of GBT (Gradient Boosted trees model), including single
+// tree settings and ensemble parameters.
+//
+// weak_count - count of trees in the ensemble
+// loss_function_type - loss function used for ensemble training
+// subsample_portion - portion of whole training set used for
+// every single tree training.
+// subsample_portion value is in (0.0, 1.0].
+// subsample_portion == 1.0 when whole dataset is
+// used on each step. Count of sample used on each
+// step is computed as
+// int(total_samples_count * subsample_portion).
+// shrinkage - regularization parameter.
+// Each tree prediction is multiplied on shrinkage value.
+
+
+struct CV_EXPORTS_W_MAP CvGBTreesParams : public CvDTreeParams
+{
+ CV_PROP_RW int weak_count;
+ CV_PROP_RW int loss_function_type;
+ CV_PROP_RW float subsample_portion;
+ CV_PROP_RW float shrinkage;
+
+ CvGBTreesParams();
+ CvGBTreesParams( int loss_function_type, int weak_count, float shrinkage,
+ float subsample_portion, int max_depth, bool use_surrogates );
+};
+
+// DataType: CLASS CvGBTrees
+// Gradient Boosting Trees (GBT) algorithm implementation.
+//
+// data - training dataset
+// params - parameters of the CvGBTrees
+// weak - array[0..(class_count-1)] of CvSeq
+// for storing tree ensembles
+// orig_response - original responses of the training set samples
+// sum_response - predicitons of the current model on the training dataset.
+// this matrix is updated on every iteration.
+// sum_response_tmp - predicitons of the model on the training set on the next
+// step. On every iteration values of sum_responses_tmp are
+// computed via sum_responses values. When the current
+// step is complete sum_response values become equal to
+// sum_responses_tmp.
+// sampleIdx - indices of samples used for training the ensemble.
+// CvGBTrees training procedure takes a set of samples
+// (train_data) and a set of responses (responses).
+// Only pairs (train_data[i], responses[i]), where i is
+// in sample_idx are used for training the ensemble.
+// subsample_train - indices of samples used for training a single decision
+// tree on the current step. This indices are countered
+// relatively to the sample_idx, so that pairs
+// (train_data[sample_idx[i]], responses[sample_idx[i]])
+// are used for training a decision tree.
+// Training set is randomly splited
+// in two parts (subsample_train and subsample_test)
+// on every iteration accordingly to the portion parameter.
+// subsample_test - relative indices of samples from the training set,
+// which are not used for training a tree on the current
+// step.
+// missing - mask of the missing values in the training set. This
+// matrix has the same size as train_data. 1 - missing
+// value, 0 - not a missing value.
+// class_labels - output class labels map.
+// rng - random number generator. Used for spliting the
+// training set.
+// class_count - count of output classes.
+// class_count == 1 in the case of regression,
+// and > 1 in the case of classification.
+// delta - Huber loss function parameter.
+// base_value - start point of the gradient descent procedure.
+// model prediction is
+// f(x) = f_0 + sum_{i=1..weak_count-1}(f_i(x)), where
+// f_0 is the base value.
+
+
+
+class CV_EXPORTS_W CvGBTrees : public CvStatModel
+{
+public:
+
+ /*
+ // DataType: ENUM
+ // Loss functions implemented in CvGBTrees.
+ //
+ // SQUARED_LOSS
+ // problem: regression
+ // loss = (x - x')^2
+ //
+ // ABSOLUTE_LOSS
+ // problem: regression
+ // loss = abs(x - x')
+ //
+ // HUBER_LOSS
+ // problem: regression
+ // loss = delta*( abs(x - x') - delta/2), if abs(x - x') > delta
+ // 1/2*(x - x')^2, if abs(x - x') <= delta,
+ // where delta is the alpha-quantile of pseudo responses from
+ // the training set.
+ //
+ // DEVIANCE_LOSS
+ // problem: classification
+ //
+ */
+ enum {SQUARED_LOSS=0, ABSOLUTE_LOSS, HUBER_LOSS=3, DEVIANCE_LOSS};
+
+
+ /*
+ // Default constructor. Creates a model only (without training).
+ // Should be followed by one form of the train(...) function.
+ //
+ // API
+ // CvGBTrees();
+
+ // INPUT
+ // OUTPUT
+ // RESULT
+ */
+ CV_WRAP CvGBTrees();
+
+
+ /*
+ // Full form constructor. Creates a gradient boosting model and does the
+ // train.
+ //
+ // API
+ // CvGBTrees( const CvMat* trainData, int tflag,
+ const CvMat* responses, const CvMat* varIdx=0,
+ const CvMat* sampleIdx=0, const CvMat* varType=0,
+ const CvMat* missingDataMask=0,
+ CvGBTreesParams params=CvGBTreesParams() );
+
+ // INPUT
+ // trainData - a set of input feature vectors.
+ // size of matrix is
+ // <count of samples> x <variables count>
+ // or <variables count> x <count of samples>
+ // depending on the tflag parameter.
+ // matrix values are float.
+ // tflag - a flag showing how do samples stored in the
+ // trainData matrix row by row (tflag=CV_ROW_SAMPLE)
+ // or column by column (tflag=CV_COL_SAMPLE).
+ // responses - a vector of responses corresponding to the samples
+ // in trainData.
+ // varIdx - indices of used variables. zero value means that all
+ // variables are active.
+ // sampleIdx - indices of used samples. zero value means that all
+ // samples from trainData are in the training set.
+ // varType - vector of <variables count> length. gives every
+ // variable type CV_VAR_CATEGORICAL or CV_VAR_ORDERED.
+ // varType = 0 means all variables are numerical.
+ // missingDataMask - a mask of misiing values in trainData.
+ // missingDataMask = 0 means that there are no missing
+ // values.
+ // params - parameters of GTB algorithm.
+ // OUTPUT
+ // RESULT
+ */
+ CvGBTrees( const CvMat* trainData, int tflag,
+ const CvMat* responses, const CvMat* varIdx=0,
+ const CvMat* sampleIdx=0, const CvMat* varType=0,
+ const CvMat* missingDataMask=0,
+ CvGBTreesParams params=CvGBTreesParams() );
+
+
+ /*
+ // Destructor.
+ */
+ virtual ~CvGBTrees();
+
+
+ /*
+ // Gradient tree boosting model training
+ //
+ // API
+ // virtual bool train( const CvMat* trainData, int tflag,
+ const CvMat* responses, const CvMat* varIdx=0,
+ const CvMat* sampleIdx=0, const CvMat* varType=0,
+ const CvMat* missingDataMask=0,
+ CvGBTreesParams params=CvGBTreesParams(),
+ bool update=false );
+
+ // INPUT
+ // trainData - a set of input feature vectors.
+ // size of matrix is
+ // <count of samples> x <variables count>
+ // or <variables count> x <count of samples>
+ // depending on the tflag parameter.
+ // matrix values are float.
+ // tflag - a flag showing how do samples stored in the
+ // trainData matrix row by row (tflag=CV_ROW_SAMPLE)
+ // or column by column (tflag=CV_COL_SAMPLE).
+ // responses - a vector of responses corresponding to the samples
+ // in trainData.
+ // varIdx - indices of used variables. zero value means that all
+ // variables are active.
+ // sampleIdx - indices of used samples. zero value means that all
+ // samples from trainData are in the training set.
+ // varType - vector of <variables count> length. gives every
+ // variable type CV_VAR_CATEGORICAL or CV_VAR_ORDERED.
+ // varType = 0 means all variables are numerical.
+ // missingDataMask - a mask of misiing values in trainData.
+ // missingDataMask = 0 means that there are no missing
+ // values.
+ // params - parameters of GTB algorithm.
+ // update - is not supported now. (!)
+ // OUTPUT
+ // RESULT
+ // Error state.
+ */
+ virtual bool train( const CvMat* trainData, int tflag,
+ const CvMat* responses, const CvMat* varIdx=0,
+ const CvMat* sampleIdx=0, const CvMat* varType=0,
+ const CvMat* missingDataMask=0,
+ CvGBTreesParams params=CvGBTreesParams(),
+ bool update=false );
+
+
+ /*
+ // Gradient tree boosting model training
+ //
+ // API
+ // virtual bool train( CvMLData* data,
+ CvGBTreesParams params=CvGBTreesParams(),
+ bool update=false ) {return false;};
+
+ // INPUT
+ // data - training set.
+ // params - parameters of GTB algorithm.
+ // update - is not supported now. (!)
+ // OUTPUT
+ // RESULT
+ // Error state.
+ */
+ virtual bool train( CvMLData* data,
+ CvGBTreesParams params=CvGBTreesParams(),
+ bool update=false );
+
+
+ /*
+ // Response value prediction
+ //
+ // API
+ // virtual float predict_serial( const CvMat* sample, const CvMat* missing=0,
+ CvMat* weak_responses=0, CvSlice slice = CV_WHOLE_SEQ,
+ int k=-1 ) const;
+
+ // INPUT
+ // sample - input sample of the same type as in the training set.
+ // missing - missing values mask. missing=0 if there are no
+ // missing values in sample vector.
+ // weak_responses - predictions of all of the trees.
+ // not implemented (!)
+ // slice - part of the ensemble used for prediction.
+ // slice = CV_WHOLE_SEQ when all trees are used.
+ // k - number of ensemble used.
+ // k is in {-1,0,1,..,<count of output classes-1>}.
+ // in the case of classification problem
+ // <count of output classes-1> ensembles are built.
+ // If k = -1 ordinary prediction is the result,
+ // otherwise function gives the prediction of the
+ // k-th ensemble only.
+ // OUTPUT
+ // RESULT
+ // Predicted value.
+ */
+ virtual float predict_serial( const CvMat* sample, const CvMat* missing=0,
+ CvMat* weakResponses=0, CvSlice slice = CV_WHOLE_SEQ,
+ int k=-1 ) const;
+
+ /*
+ // Response value prediction.
+ // Parallel version (in the case of TBB existence)
+ //
+ // API
+ // virtual float predict( const CvMat* sample, const CvMat* missing=0,
+ CvMat* weak_responses=0, CvSlice slice = CV_WHOLE_SEQ,
+ int k=-1 ) const;
+
+ // INPUT
+ // sample - input sample of the same type as in the training set.
+ // missing - missing values mask. missing=0 if there are no
+ // missing values in sample vector.
+ // weak_responses - predictions of all of the trees.
+ // not implemented (!)
+ // slice - part of the ensemble used for prediction.
+ // slice = CV_WHOLE_SEQ when all trees are used.
+ // k - number of ensemble used.
+ // k is in {-1,0,1,..,<count of output classes-1>}.
+ // in the case of classification problem
+ // <count of output classes-1> ensembles are built.
+ // If k = -1 ordinary prediction is the result,
+ // otherwise function gives the prediction of the
+ // k-th ensemble only.
+ // OUTPUT
+ // RESULT
+ // Predicted value.
+ */
+ virtual float predict( const CvMat* sample, const CvMat* missing=0,
+ CvMat* weakResponses=0, CvSlice slice = CV_WHOLE_SEQ,
+ int k=-1 ) const;
+
+ /*
+ // Deletes all the data.
+ //
+ // API
+ // virtual void clear();
+
+ // INPUT
+ // OUTPUT
+ // delete data, weak, orig_response, sum_response,
+ // weak_eval, subsample_train, subsample_test,
+ // sample_idx, missing, lass_labels
+ // delta = 0.0
+ // RESULT
+ */
+ CV_WRAP virtual void clear();
+
+ /*
+ // Compute error on the train/test set.
+ //
+ // API
+ // virtual float calc_error( CvMLData* _data, int type,
+ // std::vector<float> *resp = 0 );
+ //
+ // INPUT
+ // data - dataset
+ // type - defines which error is to compute: train (CV_TRAIN_ERROR) or
+ // test (CV_TEST_ERROR).
+ // OUTPUT
+ // resp - vector of predicitons
+ // RESULT
+ // Error value.
+ */
+ virtual float calc_error( CvMLData* _data, int type,
+ std::vector<float> *resp = 0 );
+
+ /*
+ //
+ // Write parameters of the gtb model and data. Write learned model.
+ //
+ // API
+ // virtual void write( CvFileStorage* fs, const char* name ) const;
+ //
+ // INPUT
+ // fs - file storage to read parameters from.
+ // name - model name.
+ // OUTPUT
+ // RESULT
+ */
+ virtual void write( CvFileStorage* fs, const char* name ) const;
+
+
+ /*
+ //
+ // Read parameters of the gtb model and data. Read learned model.
+ //
+ // API
+ // virtual void read( CvFileStorage* fs, CvFileNode* node );
+ //
+ // INPUT
+ // fs - file storage to read parameters from.
+ // node - file node.
+ // OUTPUT
+ // RESULT
+ */
+ virtual void read( CvFileStorage* fs, CvFileNode* node );
+
+
+ // new-style C++ interface
+ CV_WRAP CvGBTrees( const cv::Mat& trainData, int tflag,
+ const cv::Mat& responses, const cv::Mat& varIdx=cv::Mat(),
+ const cv::Mat& sampleIdx=cv::Mat(), const cv::Mat& varType=cv::Mat(),
+ const cv::Mat& missingDataMask=cv::Mat(),
+ CvGBTreesParams params=CvGBTreesParams() );
+
+ CV_WRAP virtual bool train( const cv::Mat& trainData, int tflag,
+ const cv::Mat& responses, const cv::Mat& varIdx=cv::Mat(),
+ const cv::Mat& sampleIdx=cv::Mat(), const cv::Mat& varType=cv::Mat(),
+ const cv::Mat& missingDataMask=cv::Mat(),
+ CvGBTreesParams params=CvGBTreesParams(),
+ bool update=false );
+
+ CV_WRAP virtual float predict( const cv::Mat& sample, const cv::Mat& missing=cv::Mat(),
+ const cv::Range& slice = cv::Range::all(),
+ int k=-1 ) const;
+
+protected:
+
+ /*
+ // Compute the gradient vector components.
+ //
+ // API
+ // virtual void find_gradient( const int k = 0);
+
+ // INPUT
+ // k - used for classification problem, determining current
+ // tree ensemble.
+ // OUTPUT
+ // changes components of data->responses
+ // which correspond to samples used for training
+ // on the current step.
+ // RESULT
+ */
+ virtual void find_gradient( const int k = 0);
+
+
+ /*
+ //
+ // Change values in tree leaves according to the used loss function.
+ //
+ // API
+ // virtual void change_values(CvDTree* tree, const int k = 0);
+ //
+ // INPUT
+ // tree - decision tree to change.
+ // k - used for classification problem, determining current
+ // tree ensemble.
+ // OUTPUT
+ // changes 'value' fields of the trees' leaves.
+ // changes sum_response_tmp.
+ // RESULT
+ */
+ virtual void change_values(CvDTree* tree, const int k = 0);
+
+
+ /*
+ //
+ // Find optimal constant prediction value according to the used loss
+ // function.
+ // The goal is to find a constant which gives the minimal summary loss
+ // on the _Idx samples.
+ //
+ // API
+ // virtual float find_optimal_value( const CvMat* _Idx );
+ //
+ // INPUT
+ // _Idx - indices of the samples from the training set.
+ // OUTPUT
+ // RESULT
+ // optimal constant value.
+ */
+ virtual float find_optimal_value( const CvMat* _Idx );
+
+
+ /*
+ //
+ // Randomly split the whole training set in two parts according
+ // to params.portion.
+ //
+ // API
+ // virtual void do_subsample();
+ //
+ // INPUT
+ // OUTPUT
+ // subsample_train - indices of samples used for training
+ // subsample_test - indices of samples used for test
+ // RESULT
+ */
+ virtual void do_subsample();
+
+
+ /*
+ //
+ // Internal recursive function giving an array of subtree tree leaves.
+ //
+ // API
+ // void leaves_get( CvDTreeNode** leaves, int& count, CvDTreeNode* node );
+ //
+ // INPUT
+ // node - current leaf.
+ // OUTPUT
+ // count - count of leaves in the subtree.
+ // leaves - array of pointers to leaves.
+ // RESULT
+ */
+ void leaves_get( CvDTreeNode** leaves, int& count, CvDTreeNode* node );
+
+
+ /*
+ //
+ // Get leaves of the tree.
+ //
+ // API
+ // CvDTreeNode** GetLeaves( const CvDTree* dtree, int& len );
+ //
+ // INPUT
+ // dtree - decision tree.
+ // OUTPUT
+ // len - count of the leaves.
+ // RESULT
+ // CvDTreeNode** - array of pointers to leaves.
+ */
+ CvDTreeNode** GetLeaves( const CvDTree* dtree, int& len );
+
+
+ /*
+ //
+ // Is it a regression or a classification.
+ //
+ // API
+ // bool problem_type();
+ //
+ // INPUT
+ // OUTPUT
+ // RESULT
+ // false if it is a classification problem,
+ // true - if regression.
+ */
+ virtual bool problem_type() const;
+
+
+ /*
+ //
+ // Write parameters of the gtb model.
+ //
+ // API
+ // virtual void write_params( CvFileStorage* fs ) const;
+ //
+ // INPUT
+ // fs - file storage to write parameters to.
+ // OUTPUT
+ // RESULT
+ */
+ virtual void write_params( CvFileStorage* fs ) const;
+
+
+ /*
+ //
+ // Read parameters of the gtb model and data.
+ //
+ // API
+ // virtual void read_params( CvFileStorage* fs );
+ //
+ // INPUT
+ // fs - file storage to read parameters from.
+ // OUTPUT
+ // params - parameters of the gtb model.
+ // data - contains information about the structure
+ // of the data set (count of variables,
+ // their types, etc.).
+ // class_labels - output class labels map.
+ // RESULT
+ */
+ virtual void read_params( CvFileStorage* fs, CvFileNode* fnode );
+ int get_len(const CvMat* mat) const;
+
+
+ CvDTreeTrainData* data;
+ CvGBTreesParams params;
+
+ CvSeq** weak;
+ CvMat* orig_response;
+ CvMat* sum_response;
+ CvMat* sum_response_tmp;
+ CvMat* sample_idx;
+ CvMat* subsample_train;
+ CvMat* subsample_test;
+ CvMat* missing;
+ CvMat* class_labels;
+
+ cv::RNG* rng;
+
+ int class_count;
+ float delta;
+ float base_value;
+
+};
+
+
+
+/****************************************************************************************\
+* Artificial Neural Networks (ANN) *
+\****************************************************************************************/
+
+/////////////////////////////////// Multi-Layer Perceptrons //////////////////////////////
+
+struct CV_EXPORTS_W_MAP CvANN_MLP_TrainParams
+{
+ CvANN_MLP_TrainParams();
+ CvANN_MLP_TrainParams( CvTermCriteria term_crit, int train_method,
+ double param1, double param2=0 );
+ ~CvANN_MLP_TrainParams();
+
+ enum { BACKPROP=0, RPROP=1 };
+
+ CV_PROP_RW CvTermCriteria term_crit;
+ CV_PROP_RW int train_method;
+
+ // backpropagation parameters
+ CV_PROP_RW double bp_dw_scale, bp_moment_scale;
+
+ // rprop parameters
+ CV_PROP_RW double rp_dw0, rp_dw_plus, rp_dw_minus, rp_dw_min, rp_dw_max;
+};
+
+
+class CV_EXPORTS_W CvANN_MLP : public CvStatModel
+{
+public:
+ CV_WRAP CvANN_MLP();
+ CvANN_MLP( const CvMat* layerSizes,
+ int activateFunc=CvANN_MLP::SIGMOID_SYM,
+ double fparam1=0, double fparam2=0 );
+
+ virtual ~CvANN_MLP();
+
+ virtual void create( const CvMat* layerSizes,
+ int activateFunc=CvANN_MLP::SIGMOID_SYM,
+ double fparam1=0, double fparam2=0 );
+
+ virtual int train( const CvMat* inputs, const CvMat* outputs,
+ const CvMat* sampleWeights, const CvMat* sampleIdx=0,
+ CvANN_MLP_TrainParams params = CvANN_MLP_TrainParams(),
+ int flags=0 );
+ virtual float predict( const CvMat* inputs, CV_OUT CvMat* outputs ) const;
+
+ CV_WRAP CvANN_MLP( const cv::Mat& layerSizes,
+ int activateFunc=CvANN_MLP::SIGMOID_SYM,
+ double fparam1=0, double fparam2=0 );
+
+ CV_WRAP virtual void create( const cv::Mat& layerSizes,
+ int activateFunc=CvANN_MLP::SIGMOID_SYM,
+ double fparam1=0, double fparam2=0 );
+
+ CV_WRAP virtual int train( const cv::Mat& inputs, const cv::Mat& outputs,
+ const cv::Mat& sampleWeights, const cv::Mat& sampleIdx=cv::Mat(),
+ CvANN_MLP_TrainParams params = CvANN_MLP_TrainParams(),
+ int flags=0 );
+
+ CV_WRAP virtual float predict( const cv::Mat& inputs, CV_OUT cv::Mat& outputs ) const;
+
+ CV_WRAP virtual void clear();
+
+ // possible activation functions
+ enum { IDENTITY = 0, SIGMOID_SYM = 1, GAUSSIAN = 2 };
+
+ // available training flags
+ enum { UPDATE_WEIGHTS = 1, NO_INPUT_SCALE = 2, NO_OUTPUT_SCALE = 4 };
+
+ virtual void read( CvFileStorage* fs, CvFileNode* node );
+ virtual void write( CvFileStorage* storage, const char* name ) const;
+
+ int get_layer_count() { return layer_sizes ? layer_sizes->cols : 0; }
+ const CvMat* get_layer_sizes() { return layer_sizes; }
+ double* get_weights(int layer)
+ {
+ return layer_sizes && weights &&
+ (unsigned)layer <= (unsigned)layer_sizes->cols ? weights[layer] : 0;
+ }
+
+ virtual void calc_activ_func_deriv( CvMat* xf, CvMat* deriv, const double* bias ) const;
+
+protected:
+
+ virtual bool prepare_to_train( const CvMat* _inputs, const CvMat* _outputs,
+ const CvMat* _sample_weights, const CvMat* sampleIdx,
+ CvVectors* _ivecs, CvVectors* _ovecs, double** _sw, int _flags );
+
+ // sequential random backpropagation
+ virtual int train_backprop( CvVectors _ivecs, CvVectors _ovecs, const double* _sw );
+
+ // RPROP algorithm
+ virtual int train_rprop( CvVectors _ivecs, CvVectors _ovecs, const double* _sw );
+
+ virtual void calc_activ_func( CvMat* xf, const double* bias ) const;
+ virtual void set_activ_func( int _activ_func=SIGMOID_SYM,
+ double _f_param1=0, double _f_param2=0 );
+ virtual void init_weights();
+ virtual void scale_input( const CvMat* _src, CvMat* _dst ) const;
+ virtual void scale_output( const CvMat* _src, CvMat* _dst ) const;
+ virtual void calc_input_scale( const CvVectors* vecs, int flags );
+ virtual void calc_output_scale( const CvVectors* vecs, int flags );
+
+ virtual void write_params( CvFileStorage* fs ) const;
+ virtual void read_params( CvFileStorage* fs, CvFileNode* node );
+
+ CvMat* layer_sizes;
+ CvMat* wbuf;
+ CvMat* sample_weights;
+ double** weights;
+ double f_param1, f_param2;
+ double min_val, max_val, min_val1, max_val1;
+ int activ_func;
+ int max_count, max_buf_sz;
+ CvANN_MLP_TrainParams params;
+ cv::RNG* rng;
+};
+
+/****************************************************************************************\
+* Auxilary functions declarations *
+\****************************************************************************************/
+
+/* Generates <sample> from multivariate normal distribution, where <mean> - is an
+ average row vector, <cov> - symmetric covariation matrix */
+CVAPI(void) cvRandMVNormal( CvMat* mean, CvMat* cov, CvMat* sample,
+ CvRNG* rng CV_DEFAULT(0) );
+
+/* Generates sample from gaussian mixture distribution */
+CVAPI(void) cvRandGaussMixture( CvMat* means[],
+ CvMat* covs[],
+ float weights[],
+ int clsnum,
+ CvMat* sample,
+ CvMat* sampClasses CV_DEFAULT(0) );
+
+#define CV_TS_CONCENTRIC_SPHERES 0
+
+/* creates test set */
+CVAPI(void) cvCreateTestSet( int type, CvMat** samples,
+ int num_samples,
+ int num_features,
+ CvMat** responses,
+ int num_classes, ... );
+
+/****************************************************************************************\
+* Data *
+\****************************************************************************************/
+
+#define CV_COUNT 0
+#define CV_PORTION 1
+
+struct CV_EXPORTS CvTrainTestSplit
+{
+ CvTrainTestSplit();
+ CvTrainTestSplit( int train_sample_count, bool mix = true);
+ CvTrainTestSplit( float train_sample_portion, bool mix = true);
+
+ union
+ {
+ int count;
+ float portion;
+ } train_sample_part;
+ int train_sample_part_mode;
+
+ bool mix;
+};
+
+class CV_EXPORTS CvMLData
+{
+public:
+ CvMLData();
+ virtual ~CvMLData();
+
+ // returns:
+ // 0 - OK
+ // -1 - file can not be opened or is not correct
+ int read_csv( const char* filename );
+
+ const CvMat* get_values() const;
+ const CvMat* get_responses();
+ const CvMat* get_missing() const;
+
+ void set_response_idx( int idx ); // old response become predictors, new response_idx = idx
+ // if idx < 0 there will be no response
+ int get_response_idx() const;
+
+ void set_train_test_split( const CvTrainTestSplit * spl );
+ const CvMat* get_train_sample_idx() const;
+ const CvMat* get_test_sample_idx() const;
+ void mix_train_and_test_idx();
+
+ const CvMat* get_var_idx();
+ void chahge_var_idx( int vi, bool state ); // misspelled (saved for back compitability),
+ // use change_var_idx
+ void change_var_idx( int vi, bool state ); // state == true to set vi-variable as predictor
+
+ const CvMat* get_var_types();
+ int get_var_type( int var_idx ) const;
+ // following 2 methods enable to change vars type
+ // use these methods to assign CV_VAR_CATEGORICAL type for categorical variable
+ // with numerical labels; in the other cases var types are correctly determined automatically
+ void set_var_types( const char* str ); // str examples:
+ // "ord[0-17],cat[18]", "ord[0,2,4,10-12], cat[1,3,5-9,13,14]",
+ // "cat", "ord" (all vars are categorical/ordered)
+ void change_var_type( int var_idx, int type); // type in { CV_VAR_ORDERED, CV_VAR_CATEGORICAL }
+
+ void set_delimiter( char ch );
+ char get_delimiter() const;
+
+ void set_miss_ch( char ch );
+ char get_miss_ch() const;
+
+ const std::map<std::string, int>& get_class_labels_map() const;
+
+protected:
+ virtual void clear();
+
+ void str_to_flt_elem( const char* token, float& flt_elem, int& type);
+ void free_train_test_idx();
+
+ char delimiter;
+ char miss_ch;
+ //char flt_separator;
+
+ CvMat* values;
+ CvMat* missing;
+ CvMat* var_types;
+ CvMat* var_idx_mask;
+
+ CvMat* response_out; // header
+ CvMat* var_idx_out; // mat
+ CvMat* var_types_out; // mat
+
+ int response_idx;
+
+ int train_sample_count;
+ bool mix;
+
+ int total_class_count;
+ std::map<std::string, int> class_map;
+
+ CvMat* train_sample_idx;
+ CvMat* test_sample_idx;
+ int* sample_idx; // data of train_sample_idx and test_sample_idx
+
+ cv::RNG* rng;
+};
+
+
+namespace cv
+{
+
+typedef CvStatModel StatModel;
+typedef CvParamGrid ParamGrid;
+typedef CvNormalBayesClassifier NormalBayesClassifier;
+typedef CvKNearest KNearest;
+typedef CvSVMParams SVMParams;
+typedef CvSVMKernel SVMKernel;
+typedef CvSVMSolver SVMSolver;
+typedef CvSVM SVM;
+typedef CvDTreeParams DTreeParams;
+typedef CvMLData TrainData;
+typedef CvDTree DecisionTree;
+typedef CvForestTree ForestTree;
+typedef CvRTParams RandomTreeParams;
+typedef CvRTrees RandomTrees;
+typedef CvERTreeTrainData ERTreeTRainData;
+typedef CvForestERTree ERTree;
+typedef CvERTrees ERTrees;
+typedef CvBoostParams BoostParams;
+typedef CvBoostTree BoostTree;
+typedef CvBoost Boost;
+typedef CvANN_MLP_TrainParams ANN_MLP_TrainParams;
+typedef CvANN_MLP NeuralNet_MLP;
+typedef CvGBTreesParams GradientBoostingTreeParams;
+typedef CvGBTrees GradientBoostingTrees;
+
+template<> CV_EXPORTS void Ptr<CvDTreeSplit>::delete_obj();
+
+CV_EXPORTS bool initModule_ml(void);
+
+}
+
+#endif // __cplusplus
+#endif // __OPENCV_ML_HPP__
+
+/* End of file. */
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/nonfree/features2d.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/nonfree/features2d.hpp
new file mode 100644
index 00000000..f23bec8b
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/nonfree/features2d.hpp
@@ -0,0 +1,155 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_NONFREE_FEATURES_2D_HPP__
+#define __OPENCV_NONFREE_FEATURES_2D_HPP__
+
+#include "opencv2/features2d/features2d.hpp"
+
+#ifdef __cplusplus
+
+namespace cv
+{
+
+/*!
+ SIFT implementation.
+
+ The class implements SIFT algorithm by D. Lowe.
+*/
+class CV_EXPORTS_W SIFT : public Feature2D
+{
+public:
+ CV_WRAP explicit SIFT( int nfeatures=0, int nOctaveLayers=3,
+ double contrastThreshold=0.04, double edgeThreshold=10,
+ double sigma=1.6);
+
+ //! returns the descriptor size in floats (128)
+ CV_WRAP int descriptorSize() const;
+
+ //! returns the descriptor type
+ CV_WRAP int descriptorType() const;
+
+ //! finds the keypoints using SIFT algorithm
+ void operator()(InputArray img, InputArray mask,
+ vector<KeyPoint>& keypoints) const;
+ //! finds the keypoints and computes descriptors for them using SIFT algorithm.
+ //! Optionally it can compute descriptors for the user-provided keypoints
+ void operator()(InputArray img, InputArray mask,
+ vector<KeyPoint>& keypoints,
+ OutputArray descriptors,
+ bool useProvidedKeypoints=false) const;
+
+ AlgorithmInfo* info() const;
+
+ void buildGaussianPyramid( const Mat& base, vector<Mat>& pyr, int nOctaves ) const;
+ void buildDoGPyramid( const vector<Mat>& pyr, vector<Mat>& dogpyr ) const;
+ void findScaleSpaceExtrema( const vector<Mat>& gauss_pyr, const vector<Mat>& dog_pyr,
+ vector<KeyPoint>& keypoints ) const;
+
+protected:
+ void detectImpl( const Mat& image, vector<KeyPoint>& keypoints, const Mat& mask=Mat() ) const;
+ void computeImpl( const Mat& image, vector<KeyPoint>& keypoints, Mat& descriptors ) const;
+
+ CV_PROP_RW int nfeatures;
+ CV_PROP_RW int nOctaveLayers;
+ CV_PROP_RW double contrastThreshold;
+ CV_PROP_RW double edgeThreshold;
+ CV_PROP_RW double sigma;
+};
+
+typedef SIFT SiftFeatureDetector;
+typedef SIFT SiftDescriptorExtractor;
+
+/*!
+ SURF implementation.
+
+ The class implements SURF algorithm by H. Bay et al.
+ */
+class CV_EXPORTS_W SURF : public Feature2D
+{
+public:
+ //! the default constructor
+ CV_WRAP SURF();
+ //! the full constructor taking all the necessary parameters
+ explicit CV_WRAP SURF(double hessianThreshold,
+ int nOctaves=4, int nOctaveLayers=2,
+ bool extended=true, bool upright=false);
+
+ //! returns the descriptor size in float's (64 or 128)
+ CV_WRAP int descriptorSize() const;
+
+ //! returns the descriptor type
+ CV_WRAP int descriptorType() const;
+
+ //! finds the keypoints using fast hessian detector used in SURF
+ void operator()(InputArray img, InputArray mask,
+ CV_OUT vector<KeyPoint>& keypoints) const;
+ //! finds the keypoints and computes their descriptors. Optionally it can compute descriptors for the user-provided keypoints
+ void operator()(InputArray img, InputArray mask,
+ CV_OUT vector<KeyPoint>& keypoints,
+ OutputArray descriptors,
+ bool useProvidedKeypoints=false) const;
+
+ AlgorithmInfo* info() const;
+
+ CV_PROP_RW double hessianThreshold;
+ CV_PROP_RW int nOctaves;
+ CV_PROP_RW int nOctaveLayers;
+ CV_PROP_RW bool extended;
+ CV_PROP_RW bool upright;
+
+protected:
+
+ void detectImpl( const Mat& image, vector<KeyPoint>& keypoints, const Mat& mask=Mat() ) const;
+ void computeImpl( const Mat& image, vector<KeyPoint>& keypoints, Mat& descriptors ) const;
+};
+
+typedef SURF SurfFeatureDetector;
+typedef SURF SurfDescriptorExtractor;
+
+} /* namespace cv */
+
+#endif /* __cplusplus */
+
+#endif
+
+/* End of file. */
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/nonfree/gpu.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/nonfree/gpu.hpp
new file mode 100644
index 00000000..722ef26a
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/nonfree/gpu.hpp
@@ -0,0 +1,128 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_NONFREE_GPU_HPP__
+#define __OPENCV_NONFREE_GPU_HPP__
+
+#include "opencv2/core/gpumat.hpp"
+
+namespace cv { namespace gpu {
+
+class CV_EXPORTS SURF_GPU
+{
+public:
+ enum KeypointLayout
+ {
+ X_ROW = 0,
+ Y_ROW,
+ LAPLACIAN_ROW,
+ OCTAVE_ROW,
+ SIZE_ROW,
+ ANGLE_ROW,
+ HESSIAN_ROW,
+ ROWS_COUNT
+ };
+
+ //! the default constructor
+ SURF_GPU();
+ //! the full constructor taking all the necessary parameters
+ explicit SURF_GPU(double _hessianThreshold, int _nOctaves=4,
+ int _nOctaveLayers=2, bool _extended=false, float _keypointsRatio=0.01f, bool _upright = false);
+
+ //! returns the descriptor size in float's (64 or 128)
+ int descriptorSize() const;
+
+ //! upload host keypoints to device memory
+ void uploadKeypoints(const std::vector<KeyPoint>& keypoints, GpuMat& keypointsGPU);
+ //! download keypoints from device to host memory
+ void downloadKeypoints(const GpuMat& keypointsGPU, std::vector<KeyPoint>& keypoints);
+
+ //! download descriptors from device to host memory
+ void downloadDescriptors(const GpuMat& descriptorsGPU, std::vector<float>& descriptors);
+
+ //! finds the keypoints using fast hessian detector used in SURF
+ //! supports CV_8UC1 images
+ //! keypoints will have nFeature cols and 6 rows
+ //! keypoints.ptr<float>(X_ROW)[i] will contain x coordinate of i'th feature
+ //! keypoints.ptr<float>(Y_ROW)[i] will contain y coordinate of i'th feature
+ //! keypoints.ptr<float>(LAPLACIAN_ROW)[i] will contain laplacian sign of i'th feature
+ //! keypoints.ptr<float>(OCTAVE_ROW)[i] will contain octave of i'th feature
+ //! keypoints.ptr<float>(SIZE_ROW)[i] will contain size of i'th feature
+ //! keypoints.ptr<float>(ANGLE_ROW)[i] will contain orientation of i'th feature
+ //! keypoints.ptr<float>(HESSIAN_ROW)[i] will contain response of i'th feature
+ void operator()(const GpuMat& img, const GpuMat& mask, GpuMat& keypoints);
+ //! finds the keypoints and computes their descriptors.
+ //! Optionally it can compute descriptors for the user-provided keypoints and recompute keypoints direction
+ void operator()(const GpuMat& img, const GpuMat& mask, GpuMat& keypoints, GpuMat& descriptors,
+ bool useProvidedKeypoints = false);
+
+ void operator()(const GpuMat& img, const GpuMat& mask, std::vector<KeyPoint>& keypoints);
+ void operator()(const GpuMat& img, const GpuMat& mask, std::vector<KeyPoint>& keypoints, GpuMat& descriptors,
+ bool useProvidedKeypoints = false);
+
+ void operator()(const GpuMat& img, const GpuMat& mask, std::vector<KeyPoint>& keypoints, std::vector<float>& descriptors,
+ bool useProvidedKeypoints = false);
+
+ void releaseMemory();
+
+ // SURF parameters
+ double hessianThreshold;
+ int nOctaves;
+ int nOctaveLayers;
+ bool extended;
+ bool upright;
+
+ //! max keypoints = min(keypointsRatio * img.size().area(), 65535)
+ float keypointsRatio;
+
+ GpuMat sum, mask1, maskSum, intBuffer;
+
+ GpuMat det, trace;
+
+ GpuMat maxPosBuffer;
+};
+
+} // namespace gpu
+
+} // namespace cv
+
+#endif // __OPENCV_NONFREE_GPU_HPP__
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/nonfree/nonfree.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/nonfree/nonfree.hpp
new file mode 100644
index 00000000..c64c566d
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/nonfree/nonfree.hpp
@@ -0,0 +1,57 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009-2012, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_NONFREE_HPP__
+#define __OPENCV_NONFREE_HPP__
+
+#include "opencv2/nonfree/features2d.hpp"
+
+namespace cv
+{
+
+CV_EXPORTS_W bool initModule_nonfree();
+
+}
+
+#endif
+
+/* End of file. */
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/nonfree/ocl.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/nonfree/ocl.hpp
new file mode 100644
index 00000000..ba84d244
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/nonfree/ocl.hpp
@@ -0,0 +1,140 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_NONFREE_OCL_HPP__
+#define __OPENCV_NONFREE_OCL_HPP__
+
+#include "opencv2/ocl/ocl.hpp"
+
+namespace cv
+{
+ namespace ocl
+ {
+ //! Speeded up robust features, port from GPU module.
+ ////////////////////////////////// SURF //////////////////////////////////////////
+
+ class CV_EXPORTS SURF_OCL : public cv::Feature2D
+ {
+ public:
+ enum KeypointLayout
+ {
+ X_ROW = 0,
+ Y_ROW,
+ LAPLACIAN_ROW,
+ OCTAVE_ROW,
+ SIZE_ROW,
+ ANGLE_ROW,
+ HESSIAN_ROW,
+ ROWS_COUNT
+ };
+
+ //! the default constructor
+ SURF_OCL();
+ //! the full constructor taking all the necessary parameters
+ explicit SURF_OCL(double _hessianThreshold, int _nOctaves = 4,
+ int _nOctaveLayers = 2, bool _extended = true, float _keypointsRatio = 0.01f, bool _upright = false);
+
+ //! returns the descriptor size in float's (64 or 128)
+ int descriptorSize() const;
+
+ int descriptorType() const;
+
+ //! upload host keypoints to device memory
+ void uploadKeypoints(const vector<cv::KeyPoint> &keypoints, oclMat &keypointsocl);
+ //! download keypoints from device to host memory
+ void downloadKeypoints(const oclMat &keypointsocl, vector<KeyPoint> &keypoints);
+ //! download descriptors from device to host memory
+ void downloadDescriptors(const oclMat &descriptorsocl, vector<float> &descriptors);
+ //! finds the keypoints using fast hessian detector used in SURF
+ //! supports CV_8UC1 images
+ //! keypoints will have nFeature cols and 6 rows
+ //! keypoints.ptr<float>(X_ROW)[i] will contain x coordinate of i'th feature
+ //! keypoints.ptr<float>(Y_ROW)[i] will contain y coordinate of i'th feature
+ //! keypoints.ptr<float>(LAPLACIAN_ROW)[i] will contain laplacian sign of i'th feature
+ //! keypoints.ptr<float>(OCTAVE_ROW)[i] will contain octave of i'th feature
+ //! keypoints.ptr<float>(SIZE_ROW)[i] will contain size of i'th feature
+ //! keypoints.ptr<float>(ANGLE_ROW)[i] will contain orientation of i'th feature
+ //! keypoints.ptr<float>(HESSIAN_ROW)[i] will contain response of i'th feature
+ void operator()(const oclMat &img, const oclMat &mask, oclMat &keypoints);
+ //! finds the keypoints and computes their descriptors.
+ //! Optionally it can compute descriptors for the user-provided keypoints and recompute keypoints direction
+ void operator()(const oclMat &img, const oclMat &mask, oclMat &keypoints, oclMat &descriptors,
+ bool useProvidedKeypoints = false);
+ void operator()(const oclMat &img, const oclMat &mask, std::vector<KeyPoint> &keypoints);
+ void operator()(const oclMat &img, const oclMat &mask, std::vector<KeyPoint> &keypoints, oclMat &descriptors,
+ bool useProvidedKeypoints = false);
+ void operator()(const oclMat &img, const oclMat &mask, std::vector<KeyPoint> &keypoints, std::vector<float> &descriptors,
+ bool useProvidedKeypoints = false);
+
+ //! finds the keypoints using fast hessian detector used in SURF
+ void operator()(InputArray img, InputArray mask,
+ CV_OUT vector<KeyPoint>& keypoints) const;
+ //! finds the keypoints and computes their descriptors. Optionally it can compute descriptors for the user-provided keypoints
+ void operator()(InputArray img, InputArray mask,
+ CV_OUT vector<KeyPoint>& keypoints,
+ OutputArray descriptors,
+ bool useProvidedKeypoints=false) const;
+
+ AlgorithmInfo* info() const;
+
+ void releaseMemory();
+
+ // SURF parameters
+ float hessianThreshold;
+ int nOctaves;
+ int nOctaveLayers;
+ bool extended;
+ bool upright;
+ //! max keypoints = min(keypointsRatio * img.size().area(), 65535)
+ float keypointsRatio;
+ oclMat sum, mask1, maskSum, intBuffer;
+ oclMat det, trace;
+ oclMat maxPosBuffer;
+ protected:
+ void detectImpl( const Mat& image, vector<KeyPoint>& keypoints, const Mat& mask) const;
+ void computeImpl( const Mat& image, vector<KeyPoint>& keypoints, Mat& descriptors) const;
+ };
+ }
+}
+
+#endif //__OPENCV_NONFREE_OCL_HPP__
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/objdetect.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/objdetect.hpp
new file mode 100644
index 00000000..71f201c9
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/objdetect.hpp
@@ -0,0 +1,43 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#include "opencv2/objdetect/objdetect.hpp"
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/objdetect/objdetect.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/objdetect/objdetect.hpp
new file mode 100644
index 00000000..d5d6f0b2
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/objdetect/objdetect.hpp
@@ -0,0 +1,1073 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_OBJDETECT_HPP__
+#define __OPENCV_OBJDETECT_HPP__
+
+#include "opencv2/core/core.hpp"
+
+#ifdef __cplusplus
+#include <map>
+#include <deque>
+
+extern "C" {
+#endif
+
+/****************************************************************************************\
+* Haar-like Object Detection functions *
+\****************************************************************************************/
+
+#define CV_HAAR_MAGIC_VAL 0x42500000
+#define CV_TYPE_NAME_HAAR "opencv-haar-classifier"
+
+#define CV_IS_HAAR_CLASSIFIER( haar ) \
+ ((haar) != NULL && \
+ (((const CvHaarClassifierCascade*)(haar))->flags & CV_MAGIC_MASK)==CV_HAAR_MAGIC_VAL)
+
+#define CV_HAAR_FEATURE_MAX 3
+
+typedef struct CvHaarFeature
+{
+ int tilted;
+ struct
+ {
+ CvRect r;
+ float weight;
+ } rect[CV_HAAR_FEATURE_MAX];
+} CvHaarFeature;
+
+typedef struct CvHaarClassifier
+{
+ int count;
+ CvHaarFeature* haar_feature;
+ float* threshold;
+ int* left;
+ int* right;
+ float* alpha;
+} CvHaarClassifier;
+
+typedef struct CvHaarStageClassifier
+{
+ int count;
+ float threshold;
+ CvHaarClassifier* classifier;
+
+ int next;
+ int child;
+ int parent;
+} CvHaarStageClassifier;
+
+typedef struct CvHidHaarClassifierCascade CvHidHaarClassifierCascade;
+
+typedef struct CvHaarClassifierCascade
+{
+ int flags;
+ int count;
+ CvSize orig_window_size;
+ CvSize real_window_size;
+ double scale;
+ CvHaarStageClassifier* stage_classifier;
+ CvHidHaarClassifierCascade* hid_cascade;
+} CvHaarClassifierCascade;
+
+typedef struct CvAvgComp
+{
+ CvRect rect;
+ int neighbors;
+} CvAvgComp;
+
+/* Loads haar classifier cascade from a directory.
+ It is obsolete: convert your cascade to xml and use cvLoad instead */
+CVAPI(CvHaarClassifierCascade*) cvLoadHaarClassifierCascade(
+ const char* directory, CvSize orig_window_size);
+
+CVAPI(void) cvReleaseHaarClassifierCascade( CvHaarClassifierCascade** cascade );
+
+#define CV_HAAR_DO_CANNY_PRUNING 1
+#define CV_HAAR_SCALE_IMAGE 2
+#define CV_HAAR_FIND_BIGGEST_OBJECT 4
+#define CV_HAAR_DO_ROUGH_SEARCH 8
+
+//CVAPI(CvSeq*) cvHaarDetectObjectsForROC( const CvArr* image,
+// CvHaarClassifierCascade* cascade, CvMemStorage* storage,
+// CvSeq** rejectLevels, CvSeq** levelWeightds,
+// double scale_factor CV_DEFAULT(1.1),
+// int min_neighbors CV_DEFAULT(3), int flags CV_DEFAULT(0),
+// CvSize min_size CV_DEFAULT(cvSize(0,0)), CvSize max_size CV_DEFAULT(cvSize(0,0)),
+// bool outputRejectLevels = false );
+
+
+CVAPI(CvSeq*) cvHaarDetectObjects( const CvArr* image,
+ CvHaarClassifierCascade* cascade, CvMemStorage* storage,
+ double scale_factor CV_DEFAULT(1.1),
+ int min_neighbors CV_DEFAULT(3), int flags CV_DEFAULT(0),
+ CvSize min_size CV_DEFAULT(cvSize(0,0)), CvSize max_size CV_DEFAULT(cvSize(0,0)));
+
+/* sets images for haar classifier cascade */
+CVAPI(void) cvSetImagesForHaarClassifierCascade( CvHaarClassifierCascade* cascade,
+ const CvArr* sum, const CvArr* sqsum,
+ const CvArr* tilted_sum, double scale );
+
+/* runs the cascade on the specified window */
+CVAPI(int) cvRunHaarClassifierCascade( const CvHaarClassifierCascade* cascade,
+ CvPoint pt, int start_stage CV_DEFAULT(0));
+
+
+/****************************************************************************************\
+* Latent SVM Object Detection functions *
+\****************************************************************************************/
+
+// DataType: STRUCT position
+// Structure describes the position of the filter in the feature pyramid
+// l - level in the feature pyramid
+// (x, y) - coordinate in level l
+typedef struct CvLSVMFilterPosition
+{
+ int x;
+ int y;
+ int l;
+} CvLSVMFilterPosition;
+
+// DataType: STRUCT filterObject
+// Description of the filter, which corresponds to the part of the object
+// V - ideal (penalty = 0) position of the partial filter
+// from the root filter position (V_i in the paper)
+// penaltyFunction - vector describes penalty function (d_i in the paper)
+// pf[0] * x + pf[1] * y + pf[2] * x^2 + pf[3] * y^2
+// FILTER DESCRIPTION
+// Rectangular map (sizeX x sizeY),
+// every cell stores feature vector (dimension = p)
+// H - matrix of feature vectors
+// to set and get feature vectors (i,j)
+// used formula H[(j * sizeX + i) * p + k], where
+// k - component of feature vector in cell (i, j)
+// END OF FILTER DESCRIPTION
+typedef struct CvLSVMFilterObject{
+ CvLSVMFilterPosition V;
+ float fineFunction[4];
+ int sizeX;
+ int sizeY;
+ int numFeatures;
+ float *H;
+} CvLSVMFilterObject;
+
+// data type: STRUCT CvLatentSvmDetector
+// structure contains internal representation of trained Latent SVM detector
+// num_filters - total number of filters (root plus part) in model
+// num_components - number of components in model
+// num_part_filters - array containing number of part filters for each component
+// filters - root and part filters for all model components
+// b - biases for all model components
+// score_threshold - confidence level threshold
+typedef struct CvLatentSvmDetector
+{
+ int num_filters;
+ int num_components;
+ int* num_part_filters;
+ CvLSVMFilterObject** filters;
+ float* b;
+ float score_threshold;
+}
+CvLatentSvmDetector;
+
+// data type: STRUCT CvObjectDetection
+// structure contains the bounding box and confidence level for detected object
+// rect - bounding box for a detected object
+// score - confidence level
+typedef struct CvObjectDetection
+{
+ CvRect rect;
+ float score;
+} CvObjectDetection;
+
+//////////////// Object Detection using Latent SVM //////////////
+
+
+/*
+// load trained detector from a file
+//
+// API
+// CvLatentSvmDetector* cvLoadLatentSvmDetector(const char* filename);
+// INPUT
+// filename - path to the file containing the parameters of
+ - trained Latent SVM detector
+// OUTPUT
+// trained Latent SVM detector in internal representation
+*/
+CVAPI(CvLatentSvmDetector*) cvLoadLatentSvmDetector(const char* filename);
+
+/*
+// release memory allocated for CvLatentSvmDetector structure
+//
+// API
+// void cvReleaseLatentSvmDetector(CvLatentSvmDetector** detector);
+// INPUT
+// detector - CvLatentSvmDetector structure to be released
+// OUTPUT
+*/
+CVAPI(void) cvReleaseLatentSvmDetector(CvLatentSvmDetector** detector);
+
+/*
+// find rectangular regions in the given image that are likely
+// to contain objects and corresponding confidence levels
+//
+// API
+// CvSeq* cvLatentSvmDetectObjects(const IplImage* image,
+// CvLatentSvmDetector* detector,
+// CvMemStorage* storage,
+// float overlap_threshold = 0.5f,
+// int numThreads = -1);
+// INPUT
+// image - image to detect objects in
+// detector - Latent SVM detector in internal representation
+// storage - memory storage to store the resultant sequence
+// of the object candidate rectangles
+// overlap_threshold - threshold for the non-maximum suppression algorithm
+ = 0.5f [here will be the reference to original paper]
+// OUTPUT
+// sequence of detected objects (bounding boxes and confidence levels stored in CvObjectDetection structures)
+*/
+CVAPI(CvSeq*) cvLatentSvmDetectObjects(IplImage* image,
+ CvLatentSvmDetector* detector,
+ CvMemStorage* storage,
+ float overlap_threshold CV_DEFAULT(0.5f),
+ int numThreads CV_DEFAULT(-1));
+
+#ifdef __cplusplus
+}
+
+CV_EXPORTS CvSeq* cvHaarDetectObjectsForROC( const CvArr* image,
+ CvHaarClassifierCascade* cascade, CvMemStorage* storage,
+ std::vector<int>& rejectLevels, std::vector<double>& levelWeightds,
+ double scale_factor CV_DEFAULT(1.1),
+ int min_neighbors CV_DEFAULT(3), int flags CV_DEFAULT(0),
+ CvSize min_size CV_DEFAULT(cvSize(0,0)), CvSize max_size CV_DEFAULT(cvSize(0,0)),
+ bool outputRejectLevels = false );
+
+namespace cv
+{
+
+///////////////////////////// Object Detection ////////////////////////////
+
+/*
+ * This is a class wrapping up the structure CvLatentSvmDetector and functions working with it.
+ * The class goals are:
+ * 1) provide c++ interface;
+ * 2) make it possible to load and detect more than one class (model) unlike CvLatentSvmDetector.
+ */
+class CV_EXPORTS LatentSvmDetector
+{
+public:
+ struct CV_EXPORTS ObjectDetection
+ {
+ ObjectDetection();
+ ObjectDetection( const Rect& rect, float score, int classID=-1 );
+ Rect rect;
+ float score;
+ int classID;
+ };
+
+ LatentSvmDetector();
+ LatentSvmDetector( const vector<string>& filenames, const vector<string>& classNames=vector<string>() );
+ virtual ~LatentSvmDetector();
+
+ virtual void clear();
+ virtual bool empty() const;
+ bool load( const vector<string>& filenames, const vector<string>& classNames=vector<string>() );
+
+ virtual void detect( const Mat& image,
+ vector<ObjectDetection>& objectDetections,
+ float overlapThreshold=0.5f,
+ int numThreads=-1 );
+
+ const vector<string>& getClassNames() const;
+ size_t getClassCount() const;
+
+private:
+ vector<CvLatentSvmDetector*> detectors;
+ vector<string> classNames;
+};
+
+// class for grouping object candidates, detected by Cascade Classifier, HOG etc.
+// instance of the class is to be passed to cv::partition (see cxoperations.hpp)
+class CV_EXPORTS SimilarRects
+{
+public:
+ SimilarRects(double _eps) : eps(_eps) {}
+ inline bool operator()(const Rect& r1, const Rect& r2) const
+ {
+ double delta = eps*(std::min(r1.width, r2.width) + std::min(r1.height, r2.height))*0.5;
+ return std::abs(r1.x - r2.x) <= delta &&
+ std::abs(r1.y - r2.y) <= delta &&
+ std::abs(r1.x + r1.width - r2.x - r2.width) <= delta &&
+ std::abs(r1.y + r1.height - r2.y - r2.height) <= delta;
+ }
+ double eps;
+};
+
+CV_EXPORTS void groupRectangles(CV_OUT CV_IN_OUT vector<Rect>& rectList, int groupThreshold, double eps=0.2);
+CV_EXPORTS_W void groupRectangles(CV_OUT CV_IN_OUT vector<Rect>& rectList, CV_OUT vector<int>& weights, int groupThreshold, double eps=0.2);
+CV_EXPORTS void groupRectangles( vector<Rect>& rectList, int groupThreshold, double eps, vector<int>* weights, vector<double>* levelWeights );
+CV_EXPORTS void groupRectangles(vector<Rect>& rectList, vector<int>& rejectLevels,
+ vector<double>& levelWeights, int groupThreshold, double eps=0.2);
+CV_EXPORTS void groupRectangles_meanshift(vector<Rect>& rectList, vector<double>& foundWeights, vector<double>& foundScales,
+ double detectThreshold = 0.0, Size winDetSize = Size(64, 128));
+
+
+class CV_EXPORTS FeatureEvaluator
+{
+public:
+ enum { HAAR = 0, LBP = 1, HOG = 2 };
+ virtual ~FeatureEvaluator();
+
+ virtual bool read(const FileNode& node);
+ virtual Ptr<FeatureEvaluator> clone() const;
+ virtual int getFeatureType() const;
+
+ virtual bool setImage(const Mat& img, Size origWinSize);
+ virtual bool setWindow(Point p);
+
+ virtual double calcOrd(int featureIdx) const;
+ virtual int calcCat(int featureIdx) const;
+
+ static Ptr<FeatureEvaluator> create(int type);
+};
+
+template<> CV_EXPORTS void Ptr<CvHaarClassifierCascade>::delete_obj();
+
+enum
+{
+ CASCADE_DO_CANNY_PRUNING=1,
+ CASCADE_SCALE_IMAGE=2,
+ CASCADE_FIND_BIGGEST_OBJECT=4,
+ CASCADE_DO_ROUGH_SEARCH=8
+};
+
+class CV_EXPORTS_W CascadeClassifier
+{
+public:
+ CV_WRAP CascadeClassifier();
+ CV_WRAP CascadeClassifier( const string& filename );
+ virtual ~CascadeClassifier();
+
+ CV_WRAP virtual bool empty() const;
+ CV_WRAP bool load( const string& filename );
+ virtual bool read( const FileNode& node );
+ CV_WRAP virtual void detectMultiScale( const Mat& image,
+ CV_OUT vector<Rect>& objects,
+ double scaleFactor=1.1,
+ int minNeighbors=3, int flags=0,
+ Size minSize=Size(),
+ Size maxSize=Size() );
+
+ CV_WRAP virtual void detectMultiScale( const Mat& image,
+ CV_OUT vector<Rect>& objects,
+ vector<int>& rejectLevels,
+ vector<double>& levelWeights,
+ double scaleFactor=1.1,
+ int minNeighbors=3, int flags=0,
+ Size minSize=Size(),
+ Size maxSize=Size(),
+ bool outputRejectLevels=false );
+
+
+ bool isOldFormatCascade() const;
+ virtual Size getOriginalWindowSize() const;
+ int getFeatureType() const;
+ bool setImage( const Mat& );
+
+protected:
+ //virtual bool detectSingleScale( const Mat& image, int stripCount, Size processingRectSize,
+ // int stripSize, int yStep, double factor, vector<Rect>& candidates );
+
+ virtual bool detectSingleScale( const Mat& image, int stripCount, Size processingRectSize,
+ int stripSize, int yStep, double factor, vector<Rect>& candidates,
+ vector<int>& rejectLevels, vector<double>& levelWeights, bool outputRejectLevels=false);
+
+protected:
+ enum { BOOST = 0 };
+ enum { DO_CANNY_PRUNING = 1, SCALE_IMAGE = 2,
+ FIND_BIGGEST_OBJECT = 4, DO_ROUGH_SEARCH = 8 };
+
+ friend class CascadeClassifierInvoker;
+
+ template<class FEval>
+ friend int predictOrdered( CascadeClassifier& cascade, Ptr<FeatureEvaluator> &featureEvaluator, double& weight);
+
+ template<class FEval>
+ friend int predictCategorical( CascadeClassifier& cascade, Ptr<FeatureEvaluator> &featureEvaluator, double& weight);
+
+ template<class FEval>
+ friend int predictOrderedStump( CascadeClassifier& cascade, Ptr<FeatureEvaluator> &featureEvaluator, double& weight);
+
+ template<class FEval>
+ friend int predictCategoricalStump( CascadeClassifier& cascade, Ptr<FeatureEvaluator> &featureEvaluator, double& weight);
+
+ bool setImage( Ptr<FeatureEvaluator>& feval, const Mat& image);
+ virtual int runAt( Ptr<FeatureEvaluator>& feval, Point pt, double& weight );
+
+ class Data
+ {
+ public:
+ struct CV_EXPORTS DTreeNode
+ {
+ int featureIdx;
+ float threshold; // for ordered features only
+ int left;
+ int right;
+ };
+
+ struct CV_EXPORTS DTree
+ {
+ int nodeCount;
+ };
+
+ struct CV_EXPORTS Stage
+ {
+ int first;
+ int ntrees;
+ float threshold;
+ };
+
+ bool read(const FileNode &node);
+
+ bool isStumpBased;
+
+ int stageType;
+ int featureType;
+ int ncategories;
+ Size origWinSize;
+
+ vector<Stage> stages;
+ vector<DTree> classifiers;
+ vector<DTreeNode> nodes;
+ vector<float> leaves;
+ vector<int> subsets;
+ };
+
+ Data data;
+ Ptr<FeatureEvaluator> featureEvaluator;
+ Ptr<CvHaarClassifierCascade> oldCascade;
+
+public:
+ class CV_EXPORTS MaskGenerator
+ {
+ public:
+ virtual ~MaskGenerator() {}
+ virtual cv::Mat generateMask(const cv::Mat& src)=0;
+ virtual void initializeMask(const cv::Mat& /*src*/) {};
+ };
+ void setMaskGenerator(Ptr<MaskGenerator> maskGenerator);
+ Ptr<MaskGenerator> getMaskGenerator();
+
+ void setFaceDetectionMaskGenerator();
+
+protected:
+ Ptr<MaskGenerator> maskGenerator;
+};
+
+
+//////////////// HOG (Histogram-of-Oriented-Gradients) Descriptor and Object Detector //////////////
+
+// struct for detection region of interest (ROI)
+struct DetectionROI
+{
+ // scale(size) of the bounding box
+ double scale;
+ // set of requrested locations to be evaluated
+ vector<cv::Point> locations;
+ // vector that will contain confidence values for each location
+ vector<double> confidences;
+};
+
+struct CV_EXPORTS_W HOGDescriptor
+{
+public:
+ enum { L2Hys=0 };
+ enum { DEFAULT_NLEVELS=64 };
+
+ CV_WRAP HOGDescriptor() : winSize(64,128), blockSize(16,16), blockStride(8,8),
+ cellSize(8,8), nbins(9), derivAperture(1), winSigma(-1),
+ histogramNormType(HOGDescriptor::L2Hys), L2HysThreshold(0.2), gammaCorrection(true),
+ nlevels(HOGDescriptor::DEFAULT_NLEVELS)
+ {}
+
+ CV_WRAP HOGDescriptor(Size _winSize, Size _blockSize, Size _blockStride,
+ Size _cellSize, int _nbins, int _derivAperture=1, double _winSigma=-1,
+ int _histogramNormType=HOGDescriptor::L2Hys,
+ double _L2HysThreshold=0.2, bool _gammaCorrection=false,
+ int _nlevels=HOGDescriptor::DEFAULT_NLEVELS)
+ : winSize(_winSize), blockSize(_blockSize), blockStride(_blockStride), cellSize(_cellSize),
+ nbins(_nbins), derivAperture(_derivAperture), winSigma(_winSigma),
+ histogramNormType(_histogramNormType), L2HysThreshold(_L2HysThreshold),
+ gammaCorrection(_gammaCorrection), nlevels(_nlevels)
+ {}
+
+ CV_WRAP HOGDescriptor(const String& filename)
+ {
+ load(filename);
+ }
+
+ HOGDescriptor(const HOGDescriptor& d)
+ {
+ d.copyTo(*this);
+ }
+
+ virtual ~HOGDescriptor() {}
+
+ CV_WRAP size_t getDescriptorSize() const;
+ CV_WRAP bool checkDetectorSize() const;
+ CV_WRAP double getWinSigma() const;
+
+ CV_WRAP virtual void setSVMDetector(InputArray _svmdetector);
+
+ virtual bool read(FileNode& fn);
+ virtual void write(FileStorage& fs, const String& objname) const;
+
+ CV_WRAP virtual bool load(const String& filename, const String& objname=String());
+ CV_WRAP virtual void save(const String& filename, const String& objname=String()) const;
+ virtual void copyTo(HOGDescriptor& c) const;
+
+ CV_WRAP virtual void compute(const Mat& img,
+ CV_OUT vector<float>& descriptors,
+ Size winStride=Size(), Size padding=Size(),
+ const vector<Point>& locations=vector<Point>()) const;
+ //with found weights output
+ CV_WRAP virtual void detect(const Mat& img, CV_OUT vector<Point>& foundLocations,
+ CV_OUT vector<double>& weights,
+ double hitThreshold=0, Size winStride=Size(),
+ Size padding=Size(),
+ const vector<Point>& searchLocations=vector<Point>()) const;
+ //without found weights output
+ virtual void detect(const Mat& img, CV_OUT vector<Point>& foundLocations,
+ double hitThreshold=0, Size winStride=Size(),
+ Size padding=Size(),
+ const vector<Point>& searchLocations=vector<Point>()) const;
+ //with result weights output
+ CV_WRAP virtual void detectMultiScale(const Mat& img, CV_OUT vector<Rect>& foundLocations,
+ CV_OUT vector<double>& foundWeights, double hitThreshold=0,
+ Size winStride=Size(), Size padding=Size(), double scale=1.05,
+ double finalThreshold=2.0,bool useMeanshiftGrouping = false) const;
+ //without found weights output
+ virtual void detectMultiScale(const Mat& img, CV_OUT vector<Rect>& foundLocations,
+ double hitThreshold=0, Size winStride=Size(),
+ Size padding=Size(), double scale=1.05,
+ double finalThreshold=2.0, bool useMeanshiftGrouping = false) const;
+
+ CV_WRAP virtual void computeGradient(const Mat& img, CV_OUT Mat& grad, CV_OUT Mat& angleOfs,
+ Size paddingTL=Size(), Size paddingBR=Size()) const;
+
+ CV_WRAP static vector<float> getDefaultPeopleDetector();
+ CV_WRAP static vector<float> getDaimlerPeopleDetector();
+
+ CV_PROP Size winSize;
+ CV_PROP Size blockSize;
+ CV_PROP Size blockStride;
+ CV_PROP Size cellSize;
+ CV_PROP int nbins;
+ CV_PROP int derivAperture;
+ CV_PROP double winSigma;
+ CV_PROP int histogramNormType;
+ CV_PROP double L2HysThreshold;
+ CV_PROP bool gammaCorrection;
+ CV_PROP vector<float> svmDetector;
+ CV_PROP int nlevels;
+
+
+ // evaluate specified ROI and return confidence value for each location
+ void detectROI(const cv::Mat& img, const vector<cv::Point> &locations,
+ CV_OUT std::vector<cv::Point>& foundLocations, CV_OUT std::vector<double>& confidences,
+ double hitThreshold = 0, cv::Size winStride = Size(),
+ cv::Size padding = Size()) const;
+
+ // evaluate specified ROI and return confidence value for each location in multiple scales
+ void detectMultiScaleROI(const cv::Mat& img,
+ CV_OUT std::vector<cv::Rect>& foundLocations,
+ std::vector<DetectionROI>& locations,
+ double hitThreshold = 0,
+ int groupThreshold = 0) const;
+
+ // read/parse Dalal's alt model file
+ void readALTModel(std::string modelfile);
+ void groupRectangles(vector<cv::Rect>& rectList, vector<double>& weights, int groupThreshold, double eps) const;
+};
+
+
+CV_EXPORTS_W void findDataMatrix(InputArray image,
+ CV_OUT vector<string>& codes,
+ OutputArray corners=noArray(),
+ OutputArrayOfArrays dmtx=noArray());
+CV_EXPORTS_W void drawDataMatrixCodes(InputOutputArray image,
+ const vector<string>& codes,
+ InputArray corners);
+}
+
+/****************************************************************************************\
+* Datamatrix *
+\****************************************************************************************/
+
+struct CV_EXPORTS CvDataMatrixCode {
+ char msg[4];
+ CvMat *original;
+ CvMat *corners;
+};
+
+CV_EXPORTS std::deque<CvDataMatrixCode> cvFindDataMatrix(CvMat *im);
+
+/****************************************************************************************\
+* LINE-MOD *
+\****************************************************************************************/
+
+namespace cv {
+namespace linemod {
+
+using cv::FileNode;
+using cv::FileStorage;
+using cv::Mat;
+using cv::noArray;
+using cv::OutputArrayOfArrays;
+using cv::Point;
+using cv::Ptr;
+using cv::Rect;
+using cv::Size;
+
+/// @todo Convert doxy comments to rst
+
+/**
+ * \brief Discriminant feature described by its location and label.
+ */
+struct CV_EXPORTS Feature
+{
+ int x; ///< x offset
+ int y; ///< y offset
+ int label; ///< Quantization
+
+ Feature() : x(0), y(0), label(0) {}
+ Feature(int x, int y, int label);
+
+ void read(const FileNode& fn);
+ void write(FileStorage& fs) const;
+};
+
+inline Feature::Feature(int _x, int _y, int _label) : x(_x), y(_y), label(_label) {}
+
+struct CV_EXPORTS Template
+{
+ int width;
+ int height;
+ int pyramid_level;
+ std::vector<Feature> features;
+
+ void read(const FileNode& fn);
+ void write(FileStorage& fs) const;
+};
+
+/**
+ * \brief Represents a modality operating over an image pyramid.
+ */
+class QuantizedPyramid
+{
+public:
+ // Virtual destructor
+ virtual ~QuantizedPyramid() {}
+
+ /**
+ * \brief Compute quantized image at current pyramid level for online detection.
+ *
+ * \param[out] dst The destination 8-bit image. For each pixel at most one bit is set,
+ * representing its classification.
+ */
+ virtual void quantize(Mat& dst) const =0;
+
+ /**
+ * \brief Extract most discriminant features at current pyramid level to form a new template.
+ *
+ * \param[out] templ The new template.
+ */
+ virtual bool extractTemplate(Template& templ) const =0;
+
+ /**
+ * \brief Go to the next pyramid level.
+ *
+ * \todo Allow pyramid scale factor other than 2
+ */
+ virtual void pyrDown() =0;
+
+protected:
+ /// Candidate feature with a score
+ struct Candidate
+ {
+ Candidate(int x, int y, int label, float score);
+
+ /// Sort candidates with high score to the front
+ bool operator<(const Candidate& rhs) const
+ {
+ return score > rhs.score;
+ }
+
+ Feature f;
+ float score;
+ };
+
+ /**
+ * \brief Choose candidate features so that they are not bunched together.
+ *
+ * \param[in] candidates Candidate features sorted by score.
+ * \param[out] features Destination vector of selected features.
+ * \param[in] num_features Number of candidates to select.
+ * \param[in] distance Hint for desired distance between features.
+ */
+ static void selectScatteredFeatures(const std::vector<Candidate>& candidates,
+ std::vector<Feature>& features,
+ size_t num_features, float distance);
+};
+
+inline QuantizedPyramid::Candidate::Candidate(int x, int y, int label, float _score) : f(x, y, label), score(_score) {}
+
+/**
+ * \brief Interface for modalities that plug into the LINE template matching representation.
+ *
+ * \todo Max response, to allow optimization of summing (255/MAX) features as uint8
+ */
+class CV_EXPORTS Modality
+{
+public:
+ // Virtual destructor
+ virtual ~Modality() {}
+
+ /**
+ * \brief Form a quantized image pyramid from a source image.
+ *
+ * \param[in] src The source image. Type depends on the modality.
+ * \param[in] mask Optional mask. If not empty, unmasked pixels are set to zero
+ * in quantized image and cannot be extracted as features.
+ */
+ Ptr<QuantizedPyramid> process(const Mat& src,
+ const Mat& mask = Mat()) const
+ {
+ return processImpl(src, mask);
+ }
+
+ virtual std::string name() const =0;
+
+ virtual void read(const FileNode& fn) =0;
+ virtual void write(FileStorage& fs) const =0;
+
+ /**
+ * \brief Create modality by name.
+ *
+ * The following modality types are supported:
+ * - "ColorGradient"
+ * - "DepthNormal"
+ */
+ static Ptr<Modality> create(const std::string& modality_type);
+
+ /**
+ * \brief Load a modality from file.
+ */
+ static Ptr<Modality> create(const FileNode& fn);
+
+protected:
+ // Indirection is because process() has a default parameter.
+ virtual Ptr<QuantizedPyramid> processImpl(const Mat& src,
+ const Mat& mask) const =0;
+};
+
+/**
+ * \brief Modality that computes quantized gradient orientations from a color image.
+ */
+class CV_EXPORTS ColorGradient : public Modality
+{
+public:
+ /**
+ * \brief Default constructor. Uses reasonable default parameter values.
+ */
+ ColorGradient();
+
+ /**
+ * \brief Constructor.
+ *
+ * \param weak_threshold When quantizing, discard gradients with magnitude less than this.
+ * \param num_features How many features a template must contain.
+ * \param strong_threshold Consider as candidate features only gradients whose norms are
+ * larger than this.
+ */
+ ColorGradient(float weak_threshold, size_t num_features, float strong_threshold);
+
+ virtual std::string name() const;
+
+ virtual void read(const FileNode& fn);
+ virtual void write(FileStorage& fs) const;
+
+ float weak_threshold;
+ size_t num_features;
+ float strong_threshold;
+
+protected:
+ virtual Ptr<QuantizedPyramid> processImpl(const Mat& src,
+ const Mat& mask) const;
+};
+
+/**
+ * \brief Modality that computes quantized surface normals from a dense depth map.
+ */
+class CV_EXPORTS DepthNormal : public Modality
+{
+public:
+ /**
+ * \brief Default constructor. Uses reasonable default parameter values.
+ */
+ DepthNormal();
+
+ /**
+ * \brief Constructor.
+ *
+ * \param distance_threshold Ignore pixels beyond this distance.
+ * \param difference_threshold When computing normals, ignore contributions of pixels whose
+ * depth difference with the central pixel is above this threshold.
+ * \param num_features How many features a template must contain.
+ * \param extract_threshold Consider as candidate feature only if there are no differing
+ * orientations within a distance of extract_threshold.
+ */
+ DepthNormal(int distance_threshold, int difference_threshold, size_t num_features,
+ int extract_threshold);
+
+ virtual std::string name() const;
+
+ virtual void read(const FileNode& fn);
+ virtual void write(FileStorage& fs) const;
+
+ int distance_threshold;
+ int difference_threshold;
+ size_t num_features;
+ int extract_threshold;
+
+protected:
+ virtual Ptr<QuantizedPyramid> processImpl(const Mat& src,
+ const Mat& mask) const;
+};
+
+/**
+ * \brief Debug function to colormap a quantized image for viewing.
+ */
+void colormap(const Mat& quantized, Mat& dst);
+
+/**
+ * \brief Represents a successful template match.
+ */
+struct CV_EXPORTS Match
+{
+ Match()
+ {
+ }
+
+ Match(int x, int y, float similarity, const std::string& class_id, int template_id);
+
+ /// Sort matches with high similarity to the front
+ bool operator<(const Match& rhs) const
+ {
+ // Secondarily sort on template_id for the sake of duplicate removal
+ if (similarity != rhs.similarity)
+ return similarity > rhs.similarity;
+ else
+ return template_id < rhs.template_id;
+ }
+
+ bool operator==(const Match& rhs) const
+ {
+ return x == rhs.x && y == rhs.y && similarity == rhs.similarity && class_id == rhs.class_id;
+ }
+
+ int x;
+ int y;
+ float similarity;
+ std::string class_id;
+ int template_id;
+};
+
+inline Match::Match(int _x, int _y, float _similarity, const std::string& _class_id, int _template_id)
+ : x(_x), y(_y), similarity(_similarity), class_id(_class_id), template_id(_template_id)
+ {
+ }
+
+/**
+ * \brief Object detector using the LINE template matching algorithm with any set of
+ * modalities.
+ */
+class CV_EXPORTS Detector
+{
+public:
+ /**
+ * \brief Empty constructor, initialize with read().
+ */
+ Detector();
+
+ /**
+ * \brief Constructor.
+ *
+ * \param modalities Modalities to use (color gradients, depth normals, ...).
+ * \param T_pyramid Value of the sampling step T at each pyramid level. The
+ * number of pyramid levels is T_pyramid.size().
+ */
+ Detector(const std::vector< Ptr<Modality> >& modalities, const std::vector<int>& T_pyramid);
+
+ /**
+ * \brief Detect objects by template matching.
+ *
+ * Matches globally at the lowest pyramid level, then refines locally stepping up the pyramid.
+ *
+ * \param sources Source images, one for each modality.
+ * \param threshold Similarity threshold, a percentage between 0 and 100.
+ * \param[out] matches Template matches, sorted by similarity score.
+ * \param class_ids If non-empty, only search for the desired object classes.
+ * \param[out] quantized_images Optionally return vector<Mat> of quantized images.
+ * \param masks The masks for consideration during matching. The masks should be CV_8UC1
+ * where 255 represents a valid pixel. If non-empty, the vector must be
+ * the same size as sources. Each element must be
+ * empty or the same size as its corresponding source.
+ */
+ void match(const std::vector<Mat>& sources, float threshold, std::vector<Match>& matches,
+ const std::vector<std::string>& class_ids = std::vector<std::string>(),
+ OutputArrayOfArrays quantized_images = noArray(),
+ const std::vector<Mat>& masks = std::vector<Mat>()) const;
+
+ /**
+ * \brief Add new object template.
+ *
+ * \param sources Source images, one for each modality.
+ * \param class_id Object class ID.
+ * \param object_mask Mask separating object from background.
+ * \param[out] bounding_box Optionally return bounding box of the extracted features.
+ *
+ * \return Template ID, or -1 if failed to extract a valid template.
+ */
+ int addTemplate(const std::vector<Mat>& sources, const std::string& class_id,
+ const Mat& object_mask, Rect* bounding_box = NULL);
+
+ /**
+ * \brief Add a new object template computed by external means.
+ */
+ int addSyntheticTemplate(const std::vector<Template>& templates, const std::string& class_id);
+
+ /**
+ * \brief Get the modalities used by this detector.
+ *
+ * You are not permitted to add/remove modalities, but you may dynamic_cast them to
+ * tweak parameters.
+ */
+ const std::vector< Ptr<Modality> >& getModalities() const { return modalities; }
+
+ /**
+ * \brief Get sampling step T at pyramid_level.
+ */
+ int getT(int pyramid_level) const { return T_at_level[pyramid_level]; }
+
+ /**
+ * \brief Get number of pyramid levels used by this detector.
+ */
+ int pyramidLevels() const { return pyramid_levels; }
+
+ /**
+ * \brief Get the template pyramid identified by template_id.
+ *
+ * For example, with 2 modalities (Gradient, Normal) and two pyramid levels
+ * (L0, L1), the order is (GradientL0, NormalL0, GradientL1, NormalL1).
+ */
+ const std::vector<Template>& getTemplates(const std::string& class_id, int template_id) const;
+
+ int numTemplates() const;
+ int numTemplates(const std::string& class_id) const;
+ int numClasses() const { return static_cast<int>(class_templates.size()); }
+
+ std::vector<std::string> classIds() const;
+
+ void read(const FileNode& fn);
+ void write(FileStorage& fs) const;
+
+ std::string readClass(const FileNode& fn, const std::string &class_id_override = "");
+ void writeClass(const std::string& class_id, FileStorage& fs) const;
+
+ void readClasses(const std::vector<std::string>& class_ids,
+ const std::string& format = "templates_%s.yml.gz");
+ void writeClasses(const std::string& format = "templates_%s.yml.gz") const;
+
+protected:
+ std::vector< Ptr<Modality> > modalities;
+ int pyramid_levels;
+ std::vector<int> T_at_level;
+
+ typedef std::vector<Template> TemplatePyramid;
+ typedef std::map<std::string, std::vector<TemplatePyramid> > TemplatesMap;
+ TemplatesMap class_templates;
+
+ typedef std::vector<Mat> LinearMemories;
+ // Indexed as [pyramid level][modality][quantized label]
+ typedef std::vector< std::vector<LinearMemories> > LinearMemoryPyramid;
+
+ void matchClass(const LinearMemoryPyramid& lm_pyramid,
+ const std::vector<Size>& sizes,
+ float threshold, std::vector<Match>& matches,
+ const std::string& class_id,
+ const std::vector<TemplatePyramid>& template_pyramids) const;
+};
+
+/**
+ * \brief Factory function for detector using LINE algorithm with color gradients.
+ *
+ * Default parameter settings suitable for VGA images.
+ */
+CV_EXPORTS Ptr<Detector> getDefaultLINE();
+
+/**
+ * \brief Factory function for detector using LINE-MOD algorithm with color gradients
+ * and depth normals.
+ *
+ * Default parameter settings suitable for VGA images.
+ */
+CV_EXPORTS Ptr<Detector> getDefaultLINEMOD();
+
+} // namespace linemod
+} // namespace cv
+
+#endif
+
+#endif
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/ocl/matrix_operations.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/ocl/matrix_operations.hpp
new file mode 100644
index 00000000..d6f3bb4c
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/ocl/matrix_operations.hpp
@@ -0,0 +1,490 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2010-2012, Institute Of Software Chinese Academy Of Science, all rights reserved.
+// Copyright (C) 2010-2012, Advanced Micro Devices, Inc., all rights reserved.
+// Copyright (C) 2010-2012, Multicoreware, Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_OCL_MATRIX_OPERATIONS_HPP__
+#define __OPENCV_OCL_MATRIX_OPERATIONS_HPP__
+
+#include "opencv2/ocl/ocl.hpp"
+
+namespace cv
+{
+
+ namespace ocl
+ {
+
+ enum
+ {
+ MAT_ADD = 1,
+ MAT_SUB,
+ MAT_MUL,
+ MAT_DIV,
+ MAT_NOT,
+ MAT_AND,
+ MAT_OR,
+ MAT_XOR
+ };
+
+ class CV_EXPORTS oclMatExpr
+ {
+ public:
+ oclMatExpr() : a(oclMat()), b(oclMat()), op(0) {}
+ oclMatExpr(const oclMat& _a, const oclMat& _b, int _op)
+ : a(_a), b(_b), op(_op) {}
+ operator oclMat() const;
+ void assign(oclMat& m) const;
+
+ protected:
+ oclMat a, b;
+ int op;
+ };
+ ////////////////////////////////////////////////////////////////////////
+ //////////////////////////////// oclMat ////////////////////////////////
+ ////////////////////////////////////////////////////////////////////////
+
+ inline oclMat::oclMat() : flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), offset(0), wholerows(0), wholecols(0) {}
+
+ inline oclMat::oclMat(int _rows, int _cols, int _type) : flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), offset(0), wholerows(0), wholecols(0)
+ {
+ if( _rows > 0 && _cols > 0 )
+ create( _rows, _cols, _type );
+ }
+
+ inline oclMat::oclMat(Size _size, int _type) : flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), offset(0), wholerows(0), wholecols(0)
+ {
+ if( _size.height > 0 && _size.width > 0 )
+ create( _size.height, _size.width, _type );
+ }
+
+ inline oclMat::oclMat(int _rows, int _cols, int _type, const Scalar &_s)
+ : flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), offset(0), wholerows(0), wholecols(0)
+ {
+ if(_rows > 0 && _cols > 0)
+ {
+ create(_rows, _cols, _type);
+ *this = _s;
+ }
+ }
+
+ inline oclMat::oclMat(Size _size, int _type, const Scalar &_s)
+ : flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0), offset(0), wholerows(0), wholecols(0)
+ {
+ if( _size.height > 0 && _size.width > 0 )
+ {
+ create( _size.height, _size.width, _type );
+ *this = _s;
+ }
+ }
+
+ inline oclMat::oclMat(const oclMat &m)
+ : flags(m.flags), rows(m.rows), cols(m.cols), step(m.step), data(m.data),
+ refcount(m.refcount), datastart(m.datastart), dataend(m.dataend), clCxt(m.clCxt), offset(m.offset), wholerows(m.wholerows), wholecols(m.wholecols)
+ {
+ if( refcount )
+ CV_XADD(refcount, 1);
+ }
+
+ inline oclMat::oclMat(int _rows, int _cols, int _type, void *_data, size_t _step)
+ : flags(0), rows(0), cols(0), step(0), data(0), refcount(0),
+ datastart(0), dataend(0), offset(0), wholerows(0), wholecols(0)
+ {
+ cv::Mat m(_rows, _cols, _type, _data, _step);
+ upload(m);
+ //size_t minstep = cols * elemSize();
+ //if( step == Mat::AUTO_STEP )
+ //{
+ // step = minstep;
+ // flags |= Mat::CONTINUOUS_FLAG;
+ //}
+ //else
+ //{
+ // if( rows == 1 ) step = minstep;
+ // CV_DbgAssert( step >= minstep );
+ // flags |= step == minstep ? Mat::CONTINUOUS_FLAG : 0;
+ //}
+ //dataend += step * (rows - 1) + minstep;
+ }
+
+ inline oclMat::oclMat(Size _size, int _type, void *_data, size_t _step)
+ : flags(0), rows(0), cols(0),
+ step(0), data(0), refcount(0),
+ datastart(0), dataend(0), offset(0), wholerows(0), wholecols(0)
+ {
+ cv::Mat m(_size, _type, _data, _step);
+ upload(m);
+ //size_t minstep = cols * elemSize();
+ //if( step == Mat::AUTO_STEP )
+ //{
+ // step = minstep;
+ // flags |= Mat::CONTINUOUS_FLAG;
+ //}
+ //else
+ //{
+ // if( rows == 1 ) step = minstep;
+ // CV_DbgAssert( step >= minstep );
+ // flags |= step == minstep ? Mat::CONTINUOUS_FLAG : 0;
+ //}
+ //dataend += step * (rows - 1) + minstep;
+ }
+
+
+ inline oclMat::oclMat(const oclMat &m, const Range &rRange, const Range &cRange)
+ {
+ flags = m.flags;
+ step = m.step;
+ refcount = m.refcount;
+ data = m.data;
+ datastart = m.datastart;
+ dataend = m.dataend;
+ clCxt = m.clCxt;
+ wholerows = m.wholerows;
+ wholecols = m.wholecols;
+ offset = m.offset;
+ if( rRange == Range::all() )
+ rows = m.rows;
+ else
+ {
+ CV_Assert( 0 <= rRange.start && rRange.start <= rRange.end && rRange.end <= m.rows );
+ rows = rRange.size();
+ offset += step * rRange.start;
+ }
+
+ if( cRange == Range::all() )
+ cols = m.cols;
+ else
+ {
+ CV_Assert( 0 <= cRange.start && cRange.start <= cRange.end && cRange.end <= m.cols );
+ cols = cRange.size();
+ offset += cRange.start * elemSize();
+ flags &= cols < m.cols ? ~Mat::CONTINUOUS_FLAG : -1;
+ }
+
+ if( rows == 1 )
+ flags |= Mat::CONTINUOUS_FLAG;
+
+ if( refcount )
+ CV_XADD(refcount, 1);
+ if( rows <= 0 || cols <= 0 )
+ rows = cols = 0;
+ }
+
+ inline oclMat::oclMat(const oclMat &m, const Rect &roi)
+ : flags(m.flags), rows(roi.height), cols(roi.width),
+ step(m.step), data(m.data), refcount(m.refcount),
+ datastart(m.datastart), dataend(m.dataend), clCxt(m.clCxt), offset(m.offset), wholerows(m.wholerows), wholecols(m.wholecols)
+ {
+ flags &= roi.width < m.cols ? ~Mat::CONTINUOUS_FLAG : -1;
+ offset += roi.y * step + roi.x * elemSize();
+ CV_Assert( 0 <= roi.x && 0 <= roi.width && roi.x + roi.width <= m.wholecols &&
+ 0 <= roi.y && 0 <= roi.height && roi.y + roi.height <= m.wholerows );
+ if( refcount )
+ CV_XADD(refcount, 1);
+ if( rows <= 0 || cols <= 0 )
+ rows = cols = 0;
+ }
+
+ inline oclMat::oclMat(const Mat &m)
+ : flags(0), rows(0), cols(0), step(0), data(0), refcount(0), datastart(0), dataend(0) , offset(0), wholerows(0), wholecols(0)
+ {
+ //clCxt = Context::getContext();
+ upload(m);
+ }
+
+ inline oclMat::~oclMat()
+ {
+ release();
+ }
+
+ inline oclMat &oclMat::operator = (const oclMat &m)
+ {
+ if( this != &m )
+ {
+ if( m.refcount )
+ CV_XADD(m.refcount, 1);
+ release();
+ clCxt = m.clCxt;
+ flags = m.flags;
+ rows = m.rows;
+ cols = m.cols;
+ step = m.step;
+ data = m.data;
+ datastart = m.datastart;
+ dataend = m.dataend;
+ offset = m.offset;
+ wholerows = m.wholerows;
+ wholecols = m.wholecols;
+ refcount = m.refcount;
+ }
+ return *this;
+ }
+
+ inline oclMat &oclMat::operator = (const Mat &m)
+ {
+ //clCxt = Context::getContext();
+ upload(m);
+ return *this;
+ }
+
+ inline oclMat& oclMat::operator = (const oclMatExpr& expr)
+ {
+ expr.assign(*this);
+ return *this;
+ }
+
+ /* Fixme! To be supported in OpenCL later. */
+#if 0
+ template <class T> inline oclMat::operator DevMem2D_<T>() const
+ {
+ return DevMem2D_<T>(rows, cols, (T *)data, step);
+ }
+ template <class T> inline oclMat::operator PtrStep_<T>() const
+ {
+ return PtrStep_<T>(static_cast< DevMem2D_<T> >(*this));
+ }
+#endif
+
+ //CPP: void oclMat::upload(const Mat& m);
+
+ inline oclMat::operator Mat() const
+ {
+ Mat m;
+ download(m);
+ return m;
+ }
+
+ //CPP void oclMat::download(cv::Mat& m) const;
+
+ inline oclMat oclMat::row(int y) const
+ {
+ return oclMat(*this, Range(y, y + 1), Range::all());
+ }
+ inline oclMat oclMat::col(int x) const
+ {
+ return oclMat(*this, Range::all(), Range(x, x + 1));
+ }
+ inline oclMat oclMat::rowRange(int startrow, int endrow) const
+ {
+ return oclMat(*this, Range(startrow, endrow), Range::all());
+ }
+ inline oclMat oclMat::rowRange(const Range &r) const
+ {
+ return oclMat(*this, r, Range::all());
+ }
+ inline oclMat oclMat::colRange(int startcol, int endcol) const
+ {
+ return oclMat(*this, Range::all(), Range(startcol, endcol));
+ }
+ inline oclMat oclMat::colRange(const Range &r) const
+ {
+ return oclMat(*this, Range::all(), r);
+ }
+
+ inline oclMat oclMat::clone() const
+ {
+ oclMat m;
+ copyTo(m);
+ return m;
+ }
+
+ //CPP void oclMat::copyTo( oclMat& m ) const;
+ //CPP void oclMat::copyTo( oclMat& m, const oclMat& mask ) const;
+ //CPP void oclMat::convertTo( oclMat& m, int rtype, double alpha=1, double beta=0 ) const;
+
+ inline void oclMat::assignTo( oclMat &m, int mtype ) const
+ {
+ if( mtype < 0 )
+ m = *this;
+ else
+ convertTo(m, mtype);
+ }
+
+ //CPP oclMat& oclMat::operator = (const Scalar& s);
+ //CPP oclMat& oclMat::setTo(const Scalar& s, const oclMat& mask=oclMat());
+ //CPP oclMat oclMat::reshape(int _cn, int _rows=0) const;
+ inline void oclMat::create(Size _size, int _type)
+ {
+ create(_size.height, _size.width, _type);
+ }
+ //CPP void oclMat::create(int _rows, int _cols, int _type);
+ //CPP void oclMat::release();
+
+ inline void oclMat::swap(oclMat &b)
+ {
+ std::swap( flags, b.flags );
+ std::swap( rows, b.rows );
+ std::swap( cols, b.cols );
+ std::swap( step, b.step );
+ std::swap( data, b.data );
+ std::swap( datastart, b.datastart );
+ std::swap( dataend, b.dataend );
+ std::swap( refcount, b.refcount );
+ std::swap( offset, b.offset );
+ std::swap( clCxt, b.clCxt );
+ std::swap( wholerows, b.wholerows );
+ std::swap( wholecols, b.wholecols );
+ }
+
+ inline void oclMat::locateROI( Size &wholeSize, Point &ofs ) const
+ {
+ size_t esz = elemSize();//, minstep;
+ //ptrdiff_t delta1 = offset;//, delta2 = dataend - datastart;
+ CV_DbgAssert( step > 0 );
+ if( offset == 0 )
+ ofs.x = ofs.y = 0;
+ else
+ {
+ ofs.y = (int)(offset / step);
+ ofs.x = (int)((offset - step * ofs.y) / esz);
+ //CV_DbgAssert( data == datastart + ofs.y*step + ofs.x*esz );
+ }
+ //minstep = (ofs.x + cols)*esz;
+ //wholeSize.height = (int)((delta2 - minstep)/step + 1);
+ //wholeSize.height = std::max(wholeSize.height, ofs.y + rows);
+ //wholeSize.width = (int)((delta2 - step*(wholeSize.height-1))/esz);
+ //wholeSize.width = std::max(wholeSize.width, ofs.x + cols);
+ wholeSize.height = wholerows;
+ wholeSize.width = wholecols;
+ }
+
+ inline oclMat &oclMat::adjustROI( int dtop, int dbottom, int dleft, int dright )
+ {
+ Size wholeSize;
+ Point ofs;
+ size_t esz = elemSize();
+ locateROI( wholeSize, ofs );
+ int row1 = std::max(ofs.y - dtop, 0), row2 = std::min(ofs.y + rows + dbottom, wholeSize.height);
+ int col1 = std::max(ofs.x - dleft, 0), col2 = std::min(ofs.x + cols + dright, wholeSize.width);
+ offset += (row1 - ofs.y) * step + (col1 - ofs.x) * esz;
+ rows = row2 - row1;
+ cols = col2 - col1;
+ if( esz * cols == step || rows == 1 )
+ flags |= Mat::CONTINUOUS_FLAG;
+ else
+ flags &= ~Mat::CONTINUOUS_FLAG;
+ return *this;
+ }
+
+ inline oclMat oclMat::operator()( Range rRange, Range cRange ) const
+ {
+ return oclMat(*this, rRange, cRange);
+ }
+ inline oclMat oclMat::operator()( const Rect &roi ) const
+ {
+ return oclMat(*this, roi);
+ }
+
+ inline bool oclMat::isContinuous() const
+ {
+ return (flags & Mat::CONTINUOUS_FLAG) != 0;
+ }
+ inline size_t oclMat::elemSize() const
+ {
+ return CV_ELEM_SIZE((CV_MAKE_TYPE(type(), oclchannels())));
+ }
+ inline size_t oclMat::elemSize1() const
+ {
+ return CV_ELEM_SIZE1(flags);
+ }
+ inline int oclMat::type() const
+ {
+ return CV_MAT_TYPE(flags);
+ }
+ inline int oclMat::ocltype() const
+ {
+ return CV_MAKE_TYPE(depth(), oclchannels());
+ }
+ inline int oclMat::depth() const
+ {
+ return CV_MAT_DEPTH(flags);
+ }
+ inline int oclMat::channels() const
+ {
+ return CV_MAT_CN(flags);
+ }
+ inline int oclMat::oclchannels() const
+ {
+ return (CV_MAT_CN(flags)) == 3 ? 4 : (CV_MAT_CN(flags));
+ }
+ inline size_t oclMat::step1() const
+ {
+ return step / elemSize1();
+ }
+ inline Size oclMat::size() const
+ {
+ return Size(cols, rows);
+ }
+ inline bool oclMat::empty() const
+ {
+ return data == 0;
+ }
+
+ inline oclMat oclMat::t() const
+ {
+ oclMat tmp;
+ transpose(*this, tmp);
+ return tmp;
+ }
+
+ static inline void swap( oclMat &a, oclMat &b )
+ {
+ a.swap(b);
+ }
+
+ inline void ensureSizeIsEnough(int rows, int cols, int type, oclMat &m)
+ {
+ if (m.type() == type && m.rows >= rows && m.cols >= cols)
+ m = m(Rect(0, 0, cols, rows));
+ else
+ m.create(rows, cols, type);
+ }
+
+ inline void ensureSizeIsEnough(Size size, int type, oclMat &m)
+ {
+ ensureSizeIsEnough(size.height, size.width, type, m);
+ }
+
+
+ } /* end of namespace ocl */
+
+} /* end of namespace cv */
+
+#endif /* __OPENCV_OCL_MATRIX_OPERATIONS_HPP__ */
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/ocl/ocl.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/ocl/ocl.hpp
new file mode 100644
index 00000000..e8eb3e85
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/ocl/ocl.hpp
@@ -0,0 +1,1998 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2010-2012, Institute Of Software Chinese Academy Of Science, all rights reserved.
+// Copyright (C) 2010-2012, Advanced Micro Devices, Inc., all rights reserved.
+// Copyright (C) 2010-2012, Multicoreware, Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_OCL_HPP__
+#define __OPENCV_OCL_HPP__
+
+#include <memory>
+#include <vector>
+
+#include "opencv2/core/core.hpp"
+#include "opencv2/imgproc/imgproc.hpp"
+#include "opencv2/objdetect/objdetect.hpp"
+#include "opencv2/features2d/features2d.hpp"
+#include "opencv2/ml/ml.hpp"
+
+namespace cv
+{
+ namespace ocl
+ {
+ enum DeviceType
+ {
+ CVCL_DEVICE_TYPE_DEFAULT = (1 << 0),
+ CVCL_DEVICE_TYPE_CPU = (1 << 1),
+ CVCL_DEVICE_TYPE_GPU = (1 << 2),
+ CVCL_DEVICE_TYPE_ACCELERATOR = (1 << 3),
+ //CVCL_DEVICE_TYPE_CUSTOM = (1 << 4)
+ CVCL_DEVICE_TYPE_ALL = 0xFFFFFFFF
+ };
+
+ enum DevMemRW
+ {
+ DEVICE_MEM_R_W = 0,
+ DEVICE_MEM_R_ONLY,
+ DEVICE_MEM_W_ONLY
+ };
+
+ enum DevMemType
+ {
+ DEVICE_MEM_DEFAULT = 0,
+ DEVICE_MEM_AHP, //alloc host pointer
+ DEVICE_MEM_UHP, //use host pointer
+ DEVICE_MEM_CHP, //copy host pointer
+ DEVICE_MEM_PM //persistent memory
+ };
+
+ // these classes contain OpenCL runtime information
+
+ struct PlatformInfo;
+
+ struct DeviceInfo
+ {
+ int _id; // reserved, don't use it
+
+ DeviceType deviceType;
+ std::string deviceProfile;
+ std::string deviceVersion;
+ std::string deviceName;
+ std::string deviceVendor;
+ int deviceVendorId;
+ std::string deviceDriverVersion;
+ std::string deviceExtensions;
+
+ size_t maxWorkGroupSize;
+ std::vector<size_t> maxWorkItemSizes;
+ int maxComputeUnits;
+ size_t localMemorySize;
+ size_t maxMemAllocSize;
+
+ int deviceVersionMajor;
+ int deviceVersionMinor;
+
+ bool haveDoubleSupport;
+ bool isUnifiedMemory; // 1 means integrated GPU, otherwise this value is 0
+ bool isIntelDevice;
+
+ std::string compilationExtraOptions;
+
+ const PlatformInfo* platform;
+
+ DeviceInfo();
+ ~DeviceInfo();
+ };
+
+ struct PlatformInfo
+ {
+ int _id; // reserved, don't use it
+
+ std::string platformProfile;
+ std::string platformVersion;
+ std::string platformName;
+ std::string platformVendor;
+ std::string platformExtensons;
+
+ int platformVersionMajor;
+ int platformVersionMinor;
+
+ std::vector<const DeviceInfo*> devices;
+
+ PlatformInfo();
+ ~PlatformInfo();
+ };
+
+ //////////////////////////////// Initialization & Info ////////////////////////
+ typedef std::vector<const PlatformInfo*> PlatformsInfo;
+
+ CV_EXPORTS int getOpenCLPlatforms(PlatformsInfo& platforms);
+
+ typedef std::vector<const DeviceInfo*> DevicesInfo;
+
+ CV_EXPORTS int getOpenCLDevices(DevicesInfo& devices, int deviceType = CVCL_DEVICE_TYPE_GPU,
+ const PlatformInfo* platform = NULL);
+
+ // set device you want to use
+ CV_EXPORTS void setDevice(const DeviceInfo* info);
+
+ // Initialize from OpenCL handles directly.
+ // Argument types is (pointers): cl_platform_id*, cl_context*, cl_device_id*
+ CV_EXPORTS void initializeContext(void* pClPlatform, void* pClContext, void* pClDevice);
+
+ //////////////////////////////// Error handling ////////////////////////
+ CV_EXPORTS void error(const char *error_string, const char *file, const int line, const char *func);
+
+ enum FEATURE_TYPE
+ {
+ FEATURE_CL_DOUBLE = 1,
+ FEATURE_CL_UNIFIED_MEM,
+ FEATURE_CL_VER_1_2,
+ FEATURE_CL_INTEL_DEVICE
+ };
+
+ // Represents OpenCL context, interface
+ class CV_EXPORTS Context
+ {
+ protected:
+ Context() { }
+ ~Context() { }
+ public:
+ static Context* getContext();
+
+ bool supportsFeature(FEATURE_TYPE featureType) const;
+ const DeviceInfo& getDeviceInfo() const;
+
+ const void* getOpenCLContextPtr() const;
+ const void* getOpenCLCommandQueuePtr() const;
+ const void* getOpenCLDeviceIDPtr() const;
+ };
+
+ inline const void *getClContextPtr()
+ {
+ return Context::getContext()->getOpenCLContextPtr();
+ }
+
+ inline const void *getClCommandQueuePtr()
+ {
+ return Context::getContext()->getOpenCLCommandQueuePtr();
+ }
+
+ CV_EXPORTS bool supportsFeature(FEATURE_TYPE featureType);
+
+ CV_EXPORTS void finish();
+
+ enum BINARY_CACHE_MODE
+ {
+ CACHE_NONE = 0, // do not cache OpenCL binary
+ CACHE_DEBUG = 0x1 << 0, // cache OpenCL binary when built in debug mode
+ CACHE_RELEASE = 0x1 << 1, // default behavior, only cache when built in release mode
+ CACHE_ALL = CACHE_DEBUG | CACHE_RELEASE // cache opencl binary
+ };
+ //! Enable or disable OpenCL program binary caching onto local disk
+ // After a program (*.cl files in opencl/ folder) is built at runtime, we allow the
+ // compiled OpenCL program to be cached to the path automatically as "path/*.clb"
+ // binary file, which will be reused when the OpenCV executable is started again.
+ //
+ // This feature is enabled by default.
+ CV_EXPORTS void setBinaryDiskCache(int mode = CACHE_RELEASE, cv::String path = "./");
+
+ //! set where binary cache to be saved to
+ CV_EXPORTS void setBinaryPath(const char *path);
+
+ struct ProgramSource
+ {
+ const char* name;
+ const char* programStr;
+ const char* programHash;
+
+ // Cache in memory by name (should be unique). Caching on disk disabled.
+ inline ProgramSource(const char* _name, const char* _programStr)
+ : name(_name), programStr(_programStr), programHash(NULL)
+ {
+ }
+
+ // Cache in memory by name (should be unique). Caching on disk uses programHash mark.
+ inline ProgramSource(const char* _name, const char* _programStr, const char* _programHash)
+ : name(_name), programStr(_programStr), programHash(_programHash)
+ {
+ }
+ };
+
+ //! Calls OpenCL kernel. Pass globalThreads = NULL, and cleanUp = true, to finally clean-up without executing.
+ //! Deprecated, will be replaced
+ CV_EXPORTS void openCLExecuteKernelInterop(Context *clCxt,
+ const cv::ocl::ProgramSource& source, string kernelName,
+ size_t globalThreads[3], size_t localThreads[3],
+ std::vector< std::pair<size_t, const void *> > &args,
+ int channels, int depth, const char *build_options);
+
+ class CV_EXPORTS oclMatExpr;
+ //////////////////////////////// oclMat ////////////////////////////////
+ class CV_EXPORTS oclMat
+ {
+ public:
+ //! default constructor
+ oclMat();
+ //! constructs oclMatrix of the specified size and type (_type is CV_8UC1, CV_64FC3, CV_32SC(12) etc.)
+ oclMat(int rows, int cols, int type);
+ oclMat(Size size, int type);
+ //! constucts oclMatrix and fills it with the specified value _s.
+ oclMat(int rows, int cols, int type, const Scalar &s);
+ oclMat(Size size, int type, const Scalar &s);
+ //! copy constructor
+ oclMat(const oclMat &m);
+
+ //! constructor for oclMatrix headers pointing to user-allocated data
+ oclMat(int rows, int cols, int type, void *data, size_t step = Mat::AUTO_STEP);
+ oclMat(Size size, int type, void *data, size_t step = Mat::AUTO_STEP);
+
+ //! creates a matrix header for a part of the bigger matrix
+ oclMat(const oclMat &m, const Range &rowRange, const Range &colRange);
+ oclMat(const oclMat &m, const Rect &roi);
+
+ //! builds oclMat from Mat. Perfom blocking upload to device.
+ explicit oclMat (const Mat &m);
+
+ //! destructor - calls release()
+ ~oclMat();
+
+ //! assignment operators
+ oclMat &operator = (const oclMat &m);
+ //! assignment operator. Perfom blocking upload to device.
+ oclMat &operator = (const Mat &m);
+ oclMat &operator = (const oclMatExpr& expr);
+
+ //! pefroms blocking upload data to oclMat.
+ void upload(const cv::Mat &m);
+
+
+ //! downloads data from device to host memory. Blocking calls.
+ operator Mat() const;
+ void download(cv::Mat &m) const;
+
+ //! convert to _InputArray
+ operator _InputArray();
+
+ //! convert to _OutputArray
+ operator _OutputArray();
+
+ //! returns a new oclMatrix header for the specified row
+ oclMat row(int y) const;
+ //! returns a new oclMatrix header for the specified column
+ oclMat col(int x) const;
+ //! ... for the specified row span
+ oclMat rowRange(int startrow, int endrow) const;
+ oclMat rowRange(const Range &r) const;
+ //! ... for the specified column span
+ oclMat colRange(int startcol, int endcol) const;
+ oclMat colRange(const Range &r) const;
+
+ //! returns deep copy of the oclMatrix, i.e. the data is copied
+ oclMat clone() const;
+
+ //! copies those oclMatrix elements to "m" that are marked with non-zero mask elements.
+ // It calls m.create(this->size(), this->type()).
+ // It supports any data type
+ void copyTo( oclMat &m, const oclMat &mask = oclMat()) const;
+
+ //! converts oclMatrix to another datatype with optional scalng. See cvConvertScale.
+ void convertTo( oclMat &m, int rtype, double alpha = 1, double beta = 0 ) const;
+
+ void assignTo( oclMat &m, int type = -1 ) const;
+
+ //! sets every oclMatrix element to s
+ oclMat& operator = (const Scalar &s);
+ //! sets some of the oclMatrix elements to s, according to the mask
+ oclMat& setTo(const Scalar &s, const oclMat &mask = oclMat());
+ //! creates alternative oclMatrix header for the same data, with different
+ // number of channels and/or different number of rows. see cvReshape.
+ oclMat reshape(int cn, int rows = 0) const;
+
+ //! allocates new oclMatrix data unless the oclMatrix already has specified size and type.
+ // previous data is unreferenced if needed.
+ void create(int rows, int cols, int type);
+ void create(Size size, int type);
+
+ //! allocates new oclMatrix with specified device memory type.
+ void createEx(int rows, int cols, int type, DevMemRW rw_type, DevMemType mem_type);
+ void createEx(Size size, int type, DevMemRW rw_type, DevMemType mem_type);
+
+ //! decreases reference counter;
+ // deallocate the data when reference counter reaches 0.
+ void release();
+
+ //! swaps with other smart pointer
+ void swap(oclMat &mat);
+
+ //! locates oclMatrix header within a parent oclMatrix. See below
+ void locateROI( Size &wholeSize, Point &ofs ) const;
+ //! moves/resizes the current oclMatrix ROI inside the parent oclMatrix.
+ oclMat& adjustROI( int dtop, int dbottom, int dleft, int dright );
+ //! extracts a rectangular sub-oclMatrix
+ // (this is a generalized form of row, rowRange etc.)
+ oclMat operator()( Range rowRange, Range colRange ) const;
+ oclMat operator()( const Rect &roi ) const;
+
+ oclMat& operator+=( const oclMat& m );
+ oclMat& operator-=( const oclMat& m );
+ oclMat& operator*=( const oclMat& m );
+ oclMat& operator/=( const oclMat& m );
+
+ //! returns true if the oclMatrix data is continuous
+ // (i.e. when there are no gaps between successive rows).
+ // similar to CV_IS_oclMat_CONT(cvoclMat->type)
+ bool isContinuous() const;
+ //! returns element size in bytes,
+ // similar to CV_ELEM_SIZE(cvMat->type)
+ size_t elemSize() const;
+ //! returns the size of element channel in bytes.
+ size_t elemSize1() const;
+ //! returns element type, similar to CV_MAT_TYPE(cvMat->type)
+ int type() const;
+ //! returns element type, i.e. 8UC3 returns 8UC4 because in ocl
+ //! 3 channels element actually use 4 channel space
+ int ocltype() const;
+ //! returns element type, similar to CV_MAT_DEPTH(cvMat->type)
+ int depth() const;
+ //! returns element type, similar to CV_MAT_CN(cvMat->type)
+ int channels() const;
+ //! returns element type, return 4 for 3 channels element,
+ //!becuase 3 channels element actually use 4 channel space
+ int oclchannels() const;
+ //! returns step/elemSize1()
+ size_t step1() const;
+ //! returns oclMatrix size:
+ // width == number of columns, height == number of rows
+ Size size() const;
+ //! returns true if oclMatrix data is NULL
+ bool empty() const;
+
+ //! matrix transposition
+ oclMat t() const;
+
+ /*! includes several bit-fields:
+ - the magic signature
+ - continuity flag
+ - depth
+ - number of channels
+ */
+ int flags;
+ //! the number of rows and columns
+ int rows, cols;
+ //! a distance between successive rows in bytes; includes the gap if any
+ size_t step;
+ //! pointer to the data(OCL memory object)
+ uchar *data;
+
+ //! pointer to the reference counter;
+ // when oclMatrix points to user-allocated data, the pointer is NULL
+ int *refcount;
+
+ //! helper fields used in locateROI and adjustROI
+ //datastart and dataend are not used in current version
+ uchar *datastart;
+ uchar *dataend;
+
+ //! OpenCL context associated with the oclMat object.
+ Context *clCxt; // TODO clCtx
+ //add offset for handle ROI, calculated in byte
+ int offset;
+ //add wholerows and wholecols for the whole matrix, datastart and dataend are no longer used
+ int wholerows;
+ int wholecols;
+ };
+
+ // convert InputArray/OutputArray to oclMat references
+ CV_EXPORTS oclMat& getOclMatRef(InputArray src);
+ CV_EXPORTS oclMat& getOclMatRef(OutputArray src);
+
+ ///////////////////// mat split and merge /////////////////////////////////
+ //! Compose a multi-channel array from several single-channel arrays
+ // Support all types
+ CV_EXPORTS void merge(const oclMat *src, size_t n, oclMat &dst);
+ CV_EXPORTS void merge(const vector<oclMat> &src, oclMat &dst);
+
+ //! Divides multi-channel array into several single-channel arrays
+ // Support all types
+ CV_EXPORTS void split(const oclMat &src, oclMat *dst);
+ CV_EXPORTS void split(const oclMat &src, vector<oclMat> &dst);
+
+ ////////////////////////////// Arithmetics ///////////////////////////////////
+
+ //! adds one matrix to another with scale (dst = src1 * alpha + src2 * beta + gama)
+ // supports all data types
+ CV_EXPORTS void addWeighted(const oclMat &src1, double alpha, const oclMat &src2, double beta, double gama, oclMat &dst);
+
+ //! adds one matrix to another (dst = src1 + src2)
+ // supports all data types
+ CV_EXPORTS void add(const oclMat &src1, const oclMat &src2, oclMat &dst, const oclMat &mask = oclMat());
+ //! adds scalar to a matrix (dst = src1 + s)
+ // supports all data types
+ CV_EXPORTS void add(const oclMat &src1, const Scalar &s, oclMat &dst, const oclMat &mask = oclMat());
+
+ //! subtracts one matrix from another (dst = src1 - src2)
+ // supports all data types
+ CV_EXPORTS void subtract(const oclMat &src1, const oclMat &src2, oclMat &dst, const oclMat &mask = oclMat());
+ //! subtracts scalar from a matrix (dst = src1 - s)
+ // supports all data types
+ CV_EXPORTS void subtract(const oclMat &src1, const Scalar &s, oclMat &dst, const oclMat &mask = oclMat());
+
+ //! computes element-wise product of the two arrays (dst = src1 * scale * src2)
+ // supports all data types
+ CV_EXPORTS void multiply(const oclMat &src1, const oclMat &src2, oclMat &dst, double scale = 1);
+ //! multiplies matrix to a number (dst = scalar * src)
+ // supports all data types
+ CV_EXPORTS void multiply(double scalar, const oclMat &src, oclMat &dst);
+
+ //! computes element-wise quotient of the two arrays (dst = src1 * scale / src2)
+ // supports all data types
+ CV_EXPORTS void divide(const oclMat &src1, const oclMat &src2, oclMat &dst, double scale = 1);
+ //! computes element-wise quotient of the two arrays (dst = scale / src)
+ // supports all data types
+ CV_EXPORTS void divide(double scale, const oclMat &src1, oclMat &dst);
+
+ //! computes element-wise minimum of the two arrays (dst = min(src1, src2))
+ // supports all data types
+ CV_EXPORTS void min(const oclMat &src1, const oclMat &src2, oclMat &dst);
+
+ //! computes element-wise maximum of the two arrays (dst = max(src1, src2))
+ // supports all data types
+ CV_EXPORTS void max(const oclMat &src1, const oclMat &src2, oclMat &dst);
+
+ //! compares elements of two arrays (dst = src1 \verbatim<cmpop>\endverbatim src2)
+ // supports all data types
+ CV_EXPORTS void compare(const oclMat &src1, const oclMat &src2, oclMat &dst, int cmpop);
+
+ //! transposes the matrix
+ // supports all data types
+ CV_EXPORTS void transpose(const oclMat &src, oclMat &dst);
+
+ //! computes element-wise absolute values of an array (dst = abs(src))
+ // supports all data types
+ CV_EXPORTS void abs(const oclMat &src, oclMat &dst);
+
+ //! computes element-wise absolute difference of two arrays (dst = abs(src1 - src2))
+ // supports all data types
+ CV_EXPORTS void absdiff(const oclMat &src1, const oclMat &src2, oclMat &dst);
+ //! computes element-wise absolute difference of array and scalar (dst = abs(src1 - s))
+ // supports all data types
+ CV_EXPORTS void absdiff(const oclMat &src1, const Scalar &s, oclMat &dst);
+
+ //! computes mean value and standard deviation of all or selected array elements
+ // supports all data types
+ CV_EXPORTS void meanStdDev(const oclMat &mtx, Scalar &mean, Scalar &stddev);
+
+ //! computes norm of array
+ // supports NORM_INF, NORM_L1, NORM_L2
+ // supports all data types
+ CV_EXPORTS double norm(const oclMat &src1, int normType = NORM_L2);
+
+ //! computes norm of the difference between two arrays
+ // supports NORM_INF, NORM_L1, NORM_L2
+ // supports all data types
+ CV_EXPORTS double norm(const oclMat &src1, const oclMat &src2, int normType = NORM_L2);
+
+ //! reverses the order of the rows, columns or both in a matrix
+ // supports all types
+ CV_EXPORTS void flip(const oclMat &src, oclMat &dst, int flipCode);
+
+ //! computes sum of array elements
+ // support all types
+ CV_EXPORTS Scalar sum(const oclMat &m);
+ CV_EXPORTS Scalar absSum(const oclMat &m);
+ CV_EXPORTS Scalar sqrSum(const oclMat &m);
+
+ //! finds global minimum and maximum array elements and returns their values
+ // support all C1 types
+ CV_EXPORTS void minMax(const oclMat &src, double *minVal, double *maxVal = 0, const oclMat &mask = oclMat());
+
+ //! finds global minimum and maximum array elements and returns their values with locations
+ // support all C1 types
+ CV_EXPORTS void minMaxLoc(const oclMat &src, double *minVal, double *maxVal = 0, Point *minLoc = 0, Point *maxLoc = 0,
+ const oclMat &mask = oclMat());
+
+ //! counts non-zero array elements
+ // support all types
+ CV_EXPORTS int countNonZero(const oclMat &src);
+
+ //! transforms 8-bit unsigned integers using lookup table: dst(i)=lut(src(i))
+ // destination array will have the depth type as lut and the same channels number as source
+ //It supports 8UC1 8UC4 only
+ CV_EXPORTS void LUT(const oclMat &src, const oclMat &lut, oclMat &dst);
+
+ //! only 8UC1 and 256 bins is supported now
+ CV_EXPORTS void calcHist(const oclMat &mat_src, oclMat &mat_hist);
+ //! only 8UC1 and 256 bins is supported now
+ CV_EXPORTS void equalizeHist(const oclMat &mat_src, oclMat &mat_dst);
+
+ //! only 8UC1 is supported now
+ CV_EXPORTS Ptr<cv::CLAHE> createCLAHE(double clipLimit = 40.0, Size tileGridSize = Size(8, 8));
+
+ //! bilateralFilter
+ // supports 8UC1 8UC4
+ CV_EXPORTS void bilateralFilter(const oclMat& src, oclMat& dst, int d, double sigmaColor, double sigmaSpace, int borderType=BORDER_DEFAULT);
+
+ //! Applies an adaptive bilateral filter to the input image
+ // Unlike the usual bilateral filter that uses fixed value for sigmaColor,
+ // the adaptive version calculates the local variance in he ksize neighborhood
+ // and use this as sigmaColor, for the value filtering. However, the local standard deviation is
+ // clamped to the maxSigmaColor.
+ // supports 8UC1, 8UC3
+ CV_EXPORTS void adaptiveBilateralFilter(const oclMat& src, oclMat& dst, Size ksize, double sigmaSpace, double maxSigmaColor=20.0, Point anchor = Point(-1, -1), int borderType=BORDER_DEFAULT);
+
+ //! computes exponent of each matrix element (dst = e**src)
+ // supports only CV_32FC1, CV_64FC1 type
+ CV_EXPORTS void exp(const oclMat &src, oclMat &dst);
+
+ //! computes natural logarithm of absolute value of each matrix element: dst = log(abs(src))
+ // supports only CV_32FC1, CV_64FC1 type
+ CV_EXPORTS void log(const oclMat &src, oclMat &dst);
+
+ //! computes magnitude of each (x(i), y(i)) vector
+ // supports only CV_32F, CV_64F type
+ CV_EXPORTS void magnitude(const oclMat &x, const oclMat &y, oclMat &magnitude);
+
+ //! computes angle (angle(i)) of each (x(i), y(i)) vector
+ // supports only CV_32F, CV_64F type
+ CV_EXPORTS void phase(const oclMat &x, const oclMat &y, oclMat &angle, bool angleInDegrees = false);
+
+ //! the function raises every element of tne input array to p
+ // support only CV_32F, CV_64F type
+ CV_EXPORTS void pow(const oclMat &x, double p, oclMat &y);
+
+ //! converts Cartesian coordinates to polar
+ // supports only CV_32F CV_64F type
+ CV_EXPORTS void cartToPolar(const oclMat &x, const oclMat &y, oclMat &magnitude, oclMat &angle, bool angleInDegrees = false);
+
+ //! converts polar coordinates to Cartesian
+ // supports only CV_32F CV_64F type
+ CV_EXPORTS void polarToCart(const oclMat &magnitude, const oclMat &angle, oclMat &x, oclMat &y, bool angleInDegrees = false);
+
+ //! perfroms per-elements bit-wise inversion
+ // supports all types
+ CV_EXPORTS void bitwise_not(const oclMat &src, oclMat &dst);
+
+ //! calculates per-element bit-wise disjunction of two arrays
+ // supports all types
+ CV_EXPORTS void bitwise_or(const oclMat &src1, const oclMat &src2, oclMat &dst, const oclMat &mask = oclMat());
+ CV_EXPORTS void bitwise_or(const oclMat &src1, const Scalar &s, oclMat &dst, const oclMat &mask = oclMat());
+
+ //! calculates per-element bit-wise conjunction of two arrays
+ // supports all types
+ CV_EXPORTS void bitwise_and(const oclMat &src1, const oclMat &src2, oclMat &dst, const oclMat &mask = oclMat());
+ CV_EXPORTS void bitwise_and(const oclMat &src1, const Scalar &s, oclMat &dst, const oclMat &mask = oclMat());
+
+ //! calculates per-element bit-wise "exclusive or" operation
+ // supports all types
+ CV_EXPORTS void bitwise_xor(const oclMat &src1, const oclMat &src2, oclMat &dst, const oclMat &mask = oclMat());
+ CV_EXPORTS void bitwise_xor(const oclMat &src1, const Scalar &s, oclMat &dst, const oclMat &mask = oclMat());
+
+ //! Logical operators
+ CV_EXPORTS oclMat operator ~ (const oclMat &);
+ CV_EXPORTS oclMat operator | (const oclMat &, const oclMat &);
+ CV_EXPORTS oclMat operator & (const oclMat &, const oclMat &);
+ CV_EXPORTS oclMat operator ^ (const oclMat &, const oclMat &);
+
+
+ //! Mathematics operators
+ CV_EXPORTS oclMatExpr operator + (const oclMat &src1, const oclMat &src2);
+ CV_EXPORTS oclMatExpr operator - (const oclMat &src1, const oclMat &src2);
+ CV_EXPORTS oclMatExpr operator * (const oclMat &src1, const oclMat &src2);
+ CV_EXPORTS oclMatExpr operator / (const oclMat &src1, const oclMat &src2);
+
+ //! computes convolution of two images
+ // support only CV_32FC1 type
+ CV_EXPORTS void convolve(const oclMat &image, const oclMat &temp1, oclMat &result);
+
+ CV_EXPORTS void cvtColor(const oclMat &src, oclMat &dst, int code, int dcn = 0);
+
+ //! initializes a scaled identity matrix
+ CV_EXPORTS void setIdentity(oclMat& src, const Scalar & val = Scalar(1));
+
+ //! fills the output array with repeated copies of the input array
+ CV_EXPORTS void repeat(const oclMat & src, int ny, int nx, oclMat & dst);
+
+ //////////////////////////////// Filter Engine ////////////////////////////////
+
+ /*!
+ The Base Class for 1D or Row-wise Filters
+
+ This is the base class for linear or non-linear filters that process 1D data.
+ In particular, such filters are used for the "horizontal" filtering parts in separable filters.
+ */
+ class CV_EXPORTS BaseRowFilter_GPU
+ {
+ public:
+ BaseRowFilter_GPU(int ksize_, int anchor_, int bordertype_) : ksize(ksize_), anchor(anchor_), bordertype(bordertype_) {}
+ virtual ~BaseRowFilter_GPU() {}
+ virtual void operator()(const oclMat &src, oclMat &dst) = 0;
+ int ksize, anchor, bordertype;
+ };
+
+ /*!
+ The Base Class for Column-wise Filters
+
+ This is the base class for linear or non-linear filters that process columns of 2D arrays.
+ Such filters are used for the "vertical" filtering parts in separable filters.
+ */
+ class CV_EXPORTS BaseColumnFilter_GPU
+ {
+ public:
+ BaseColumnFilter_GPU(int ksize_, int anchor_, int bordertype_) : ksize(ksize_), anchor(anchor_), bordertype(bordertype_) {}
+ virtual ~BaseColumnFilter_GPU() {}
+ virtual void operator()(const oclMat &src, oclMat &dst) = 0;
+ int ksize, anchor, bordertype;
+ };
+
+ /*!
+ The Base Class for Non-Separable 2D Filters.
+
+ This is the base class for linear or non-linear 2D filters.
+ */
+ class CV_EXPORTS BaseFilter_GPU
+ {
+ public:
+ BaseFilter_GPU(const Size &ksize_, const Point &anchor_, const int &borderType_)
+ : ksize(ksize_), anchor(anchor_), borderType(borderType_) {}
+ virtual ~BaseFilter_GPU() {}
+ virtual void operator()(const oclMat &src, oclMat &dst) = 0;
+ Size ksize;
+ Point anchor;
+ int borderType;
+ };
+
+ /*!
+ The Base Class for Filter Engine.
+
+ The class can be used to apply an arbitrary filtering operation to an image.
+ It contains all the necessary intermediate buffers.
+ */
+ class CV_EXPORTS FilterEngine_GPU
+ {
+ public:
+ virtual ~FilterEngine_GPU() {}
+
+ virtual void apply(const oclMat &src, oclMat &dst, Rect roi = Rect(0, 0, -1, -1)) = 0;
+ };
+
+ //! returns the non-separable filter engine with the specified filter
+ CV_EXPORTS Ptr<FilterEngine_GPU> createFilter2D_GPU(const Ptr<BaseFilter_GPU> filter2D);
+
+ //! returns the primitive row filter with the specified kernel
+ CV_EXPORTS Ptr<BaseRowFilter_GPU> getLinearRowFilter_GPU(int srcType, int bufType, const Mat &rowKernel,
+ int anchor = -1, int bordertype = BORDER_DEFAULT);
+
+ //! returns the primitive column filter with the specified kernel
+ CV_EXPORTS Ptr<BaseColumnFilter_GPU> getLinearColumnFilter_GPU(int bufType, int dstType, const Mat &columnKernel,
+ int anchor = -1, int bordertype = BORDER_DEFAULT, double delta = 0.0);
+
+ //! returns the separable linear filter engine
+ CV_EXPORTS Ptr<FilterEngine_GPU> createSeparableLinearFilter_GPU(int srcType, int dstType, const Mat &rowKernel,
+ const Mat &columnKernel, const Point &anchor = Point(-1, -1), double delta = 0.0, int bordertype = BORDER_DEFAULT, Size imgSize = Size(-1,-1));
+
+ //! returns the separable filter engine with the specified filters
+ CV_EXPORTS Ptr<FilterEngine_GPU> createSeparableFilter_GPU(const Ptr<BaseRowFilter_GPU> &rowFilter,
+ const Ptr<BaseColumnFilter_GPU> &columnFilter);
+
+ //! returns the Gaussian filter engine
+ CV_EXPORTS Ptr<FilterEngine_GPU> createGaussianFilter_GPU(int type, Size ksize, double sigma1, double sigma2 = 0, int bordertype = BORDER_DEFAULT, Size imgSize = Size(-1,-1));
+
+ //! returns filter engine for the generalized Sobel operator
+ CV_EXPORTS Ptr<FilterEngine_GPU> createDerivFilter_GPU( int srcType, int dstType, int dx, int dy, int ksize, int borderType = BORDER_DEFAULT, Size imgSize = Size(-1,-1) );
+
+ //! applies Laplacian operator to the image
+ // supports only ksize = 1 and ksize = 3
+ CV_EXPORTS void Laplacian(const oclMat &src, oclMat &dst, int ddepth, int ksize = 1, double scale = 1,
+ double delta=0, int borderType=BORDER_DEFAULT);
+
+ //! returns 2D box filter
+ // dst type must be the same as source type
+ CV_EXPORTS Ptr<BaseFilter_GPU> getBoxFilter_GPU(int srcType, int dstType,
+ const Size &ksize, Point anchor = Point(-1, -1), int borderType = BORDER_DEFAULT);
+
+ //! returns box filter engine
+ CV_EXPORTS Ptr<FilterEngine_GPU> createBoxFilter_GPU(int srcType, int dstType, const Size &ksize,
+ const Point &anchor = Point(-1, -1), int borderType = BORDER_DEFAULT);
+
+ //! returns 2D filter with the specified kernel
+ // supports: dst type must be the same as source type
+ CV_EXPORTS Ptr<BaseFilter_GPU> getLinearFilter_GPU(int srcType, int dstType, const Mat &kernel, const Size &ksize,
+ const Point &anchor = Point(-1, -1), int borderType = BORDER_DEFAULT);
+
+ //! returns the non-separable linear filter engine
+ // supports: dst type must be the same as source type
+ CV_EXPORTS Ptr<FilterEngine_GPU> createLinearFilter_GPU(int srcType, int dstType, const Mat &kernel,
+ const Point &anchor = Point(-1, -1), int borderType = BORDER_DEFAULT);
+
+ //! smooths the image using the normalized box filter
+ CV_EXPORTS void boxFilter(const oclMat &src, oclMat &dst, int ddepth, Size ksize,
+ Point anchor = Point(-1, -1), int borderType = BORDER_DEFAULT);
+
+ //! returns 2D morphological filter
+ //! only MORPH_ERODE and MORPH_DILATE are supported
+ // supports CV_8UC1, CV_8UC4, CV_32FC1 and CV_32FC4 types
+ // kernel must have CV_8UC1 type, one rows and cols == ksize.width * ksize.height
+ CV_EXPORTS Ptr<BaseFilter_GPU> getMorphologyFilter_GPU(int op, int type, const Mat &kernel, const Size &ksize,
+ Point anchor = Point(-1, -1));
+
+ //! returns morphological filter engine. Only MORPH_ERODE and MORPH_DILATE are supported.
+ CV_EXPORTS Ptr<FilterEngine_GPU> createMorphologyFilter_GPU(int op, int type, const Mat &kernel,
+ const Point &anchor = Point(-1, -1), int iterations = 1);
+
+ //! a synonym for normalized box filter
+ static inline void blur(const oclMat &src, oclMat &dst, Size ksize, Point anchor = Point(-1, -1),
+ int borderType = BORDER_CONSTANT)
+ {
+ boxFilter(src, dst, -1, ksize, anchor, borderType);
+ }
+
+ //! applies non-separable 2D linear filter to the image
+ CV_EXPORTS void filter2D(const oclMat &src, oclMat &dst, int ddepth, const Mat &kernel,
+ Point anchor = Point(-1, -1), double delta = 0.0, int borderType = BORDER_DEFAULT);
+
+ //! applies separable 2D linear filter to the image
+ CV_EXPORTS void sepFilter2D(const oclMat &src, oclMat &dst, int ddepth, const Mat &kernelX, const Mat &kernelY,
+ Point anchor = Point(-1, -1), double delta = 0.0, int bordertype = BORDER_DEFAULT);
+
+ //! applies generalized Sobel operator to the image
+ // dst.type must equalize src.type
+ // supports data type: CV_8UC1, CV_8UC4, CV_32FC1 and CV_32FC4
+ // supports border type: BORDER_CONSTANT, BORDER_REPLICATE, BORDER_REFLECT,BORDER_REFLECT_101
+ CV_EXPORTS void Sobel(const oclMat &src, oclMat &dst, int ddepth, int dx, int dy, int ksize = 3, double scale = 1, double delta = 0.0, int bordertype = BORDER_DEFAULT);
+
+ //! applies the vertical or horizontal Scharr operator to the image
+ // dst.type must equalize src.type
+ // supports data type: CV_8UC1, CV_8UC4, CV_32FC1 and CV_32FC4
+ // supports border type: BORDER_CONSTANT, BORDER_REPLICATE, BORDER_REFLECT,BORDER_REFLECT_101
+ CV_EXPORTS void Scharr(const oclMat &src, oclMat &dst, int ddepth, int dx, int dy, double scale = 1, double delta = 0.0, int bordertype = BORDER_DEFAULT);
+
+ //! smooths the image using Gaussian filter.
+ // dst.type must equalize src.type
+ // supports data type: CV_8UC1, CV_8UC4, CV_32FC1 and CV_32FC4
+ // supports border type: BORDER_CONSTANT, BORDER_REPLICATE, BORDER_REFLECT,BORDER_REFLECT_101
+ CV_EXPORTS void GaussianBlur(const oclMat &src, oclMat &dst, Size ksize, double sigma1, double sigma2 = 0, int bordertype = BORDER_DEFAULT);
+
+ //! erodes the image (applies the local minimum operator)
+ // supports data type: CV_8UC1, CV_8UC4, CV_32FC1 and CV_32FC4
+ CV_EXPORTS void erode( const oclMat &src, oclMat &dst, const Mat &kernel, Point anchor = Point(-1, -1), int iterations = 1,
+
+ int borderType = BORDER_CONSTANT, const Scalar &borderValue = morphologyDefaultBorderValue());
+
+
+ //! dilates the image (applies the local maximum operator)
+ // supports data type: CV_8UC1, CV_8UC4, CV_32FC1 and CV_32FC4
+ CV_EXPORTS void dilate( const oclMat &src, oclMat &dst, const Mat &kernel, Point anchor = Point(-1, -1), int iterations = 1,
+
+ int borderType = BORDER_CONSTANT, const Scalar &borderValue = morphologyDefaultBorderValue());
+
+
+ //! applies an advanced morphological operation to the image
+ CV_EXPORTS void morphologyEx( const oclMat &src, oclMat &dst, int op, const Mat &kernel, Point anchor = Point(-1, -1), int iterations = 1,
+
+ int borderType = BORDER_CONSTANT, const Scalar &borderValue = morphologyDefaultBorderValue());
+
+
+ ////////////////////////////// Image processing //////////////////////////////
+ //! Does mean shift filtering on GPU.
+ CV_EXPORTS void meanShiftFiltering(const oclMat &src, oclMat &dst, int sp, int sr,
+ TermCriteria criteria = TermCriteria(TermCriteria::MAX_ITER + TermCriteria::EPS, 5, 1));
+
+ //! Does mean shift procedure on GPU.
+ CV_EXPORTS void meanShiftProc(const oclMat &src, oclMat &dstr, oclMat &dstsp, int sp, int sr,
+ TermCriteria criteria = TermCriteria(TermCriteria::MAX_ITER + TermCriteria::EPS, 5, 1));
+
+ //! Does mean shift segmentation with elimiation of small regions.
+ CV_EXPORTS void meanShiftSegmentation(const oclMat &src, Mat &dst, int sp, int sr, int minsize,
+ TermCriteria criteria = TermCriteria(TermCriteria::MAX_ITER + TermCriteria::EPS, 5, 1));
+
+ //! applies fixed threshold to the image.
+ // supports CV_8UC1 and CV_32FC1 data type
+ // supports threshold type: THRESH_BINARY, THRESH_BINARY_INV, THRESH_TRUNC, THRESH_TOZERO, THRESH_TOZERO_INV
+ CV_EXPORTS double threshold(const oclMat &src, oclMat &dst, double thresh, double maxVal, int type = THRESH_TRUNC);
+
+ //! resizes the image
+ // Supports INTER_NEAREST, INTER_LINEAR
+ // supports CV_8UC1, CV_8UC4, CV_32FC1 and CV_32FC4 types
+ CV_EXPORTS void resize(const oclMat &src, oclMat &dst, Size dsize, double fx = 0, double fy = 0, int interpolation = INTER_LINEAR);
+
+ //! Applies a generic geometrical transformation to an image.
+
+ // Supports INTER_NEAREST, INTER_LINEAR.
+ // Map1 supports CV_16SC2, CV_32FC2 types.
+ // Src supports CV_8UC1, CV_8UC2, CV_8UC4.
+ CV_EXPORTS void remap(const oclMat &src, oclMat &dst, oclMat &map1, oclMat &map2, int interpolation, int bordertype, const Scalar &value = Scalar());
+
+ //! copies 2D array to a larger destination array and pads borders with user-specifiable constant
+ // supports CV_8UC1, CV_8UC4, CV_32SC1 types
+ CV_EXPORTS void copyMakeBorder(const oclMat &src, oclMat &dst, int top, int bottom, int left, int right, int boardtype, const Scalar &value = Scalar());
+
+ //! Smoothes image using median filter
+ // The source 1- or 4-channel image. m should be 3 or 5, the image depth should be CV_8U or CV_32F.
+ CV_EXPORTS void medianFilter(const oclMat &src, oclMat &dst, int m);
+
+ //! warps the image using affine transformation
+ // Supports INTER_NEAREST, INTER_LINEAR, INTER_CUBIC
+ // supports CV_8UC1, CV_8UC4, CV_32FC1 and CV_32FC4 types
+ CV_EXPORTS void warpAffine(const oclMat &src, oclMat &dst, const Mat &M, Size dsize, int flags = INTER_LINEAR);
+
+ //! warps the image using perspective transformation
+ // Supports INTER_NEAREST, INTER_LINEAR, INTER_CUBIC
+ // supports CV_8UC1, CV_8UC4, CV_32FC1 and CV_32FC4 types
+ CV_EXPORTS void warpPerspective(const oclMat &src, oclMat &dst, const Mat &M, Size dsize, int flags = INTER_LINEAR);
+
+ //! computes the integral image and integral for the squared image
+ // sum will have CV_32S type, sqsum - CV32F type
+ // supports only CV_8UC1 source type
+ CV_EXPORTS void integral(const oclMat &src, oclMat &sum, oclMat &sqsum);
+ CV_EXPORTS void integral(const oclMat &src, oclMat &sum);
+ CV_EXPORTS void cornerHarris(const oclMat &src, oclMat &dst, int blockSize, int ksize, double k, int bordertype = cv::BORDER_DEFAULT);
+ CV_EXPORTS void cornerHarris_dxdy(const oclMat &src, oclMat &dst, oclMat &Dx, oclMat &Dy,
+ int blockSize, int ksize, double k, int bordertype = cv::BORDER_DEFAULT);
+ CV_EXPORTS void cornerMinEigenVal(const oclMat &src, oclMat &dst, int blockSize, int ksize, int bordertype = cv::BORDER_DEFAULT);
+ CV_EXPORTS void cornerMinEigenVal_dxdy(const oclMat &src, oclMat &dst, oclMat &Dx, oclMat &Dy,
+ int blockSize, int ksize, int bordertype = cv::BORDER_DEFAULT);
+ /////////////////////////////////// ML ///////////////////////////////////////////
+
+ //! Compute closest centers for each lines in source and lable it after center's index
+ // supports CV_32FC1/CV_32FC2/CV_32FC4 data type
+ // supports NORM_L1 and NORM_L2 distType
+ // if indices is provided, only the indexed rows will be calculated and their results are in the same
+ // order of indices
+ CV_EXPORTS void distanceToCenters(const oclMat &src, const oclMat &centers, Mat &dists, Mat &labels, int distType = NORM_L2SQR);
+
+ //!Does k-means procedure on GPU
+ // supports CV_32FC1/CV_32FC2/CV_32FC4 data type
+ CV_EXPORTS double kmeans(const oclMat &src, int K, oclMat &bestLabels,
+ TermCriteria criteria, int attemps, int flags, oclMat &centers);
+
+
+ ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+ ///////////////////////////////////////////CascadeClassifier//////////////////////////////////////////////////////////////////
+ ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+ class CV_EXPORTS_W OclCascadeClassifier : public cv::CascadeClassifier
+ {
+ public:
+ OclCascadeClassifier() {};
+ ~OclCascadeClassifier() {};
+
+ CvSeq* oclHaarDetectObjects(oclMat &gimg, CvMemStorage *storage, double scaleFactor,
+ int minNeighbors, int flags, CvSize minSize = cvSize(0, 0), CvSize maxSize = cvSize(0, 0));
+ void detectMultiScale(oclMat &image, CV_OUT std::vector<cv::Rect>& faces,
+ double scaleFactor = 1.1, int minNeighbors = 3, int flags = 0,
+ Size minSize = Size(), Size maxSize = Size());
+ };
+
+ class CV_EXPORTS OclCascadeClassifierBuf : public cv::CascadeClassifier
+ {
+ public:
+ OclCascadeClassifierBuf() :
+ m_flags(0), initialized(false), m_scaleFactor(0), buffers(NULL) {}
+
+ ~OclCascadeClassifierBuf() { release(); }
+
+ void detectMultiScale(oclMat &image, CV_OUT std::vector<cv::Rect>& faces,
+ double scaleFactor = 1.1, int minNeighbors = 3, int flags = 0,
+ Size minSize = Size(), Size maxSize = Size());
+ void release();
+
+ private:
+ void Init(const int rows, const int cols, double scaleFactor, int flags,
+ const int outputsz, const size_t localThreads[],
+ CvSize minSize, CvSize maxSize);
+ void CreateBaseBufs(const int datasize, const int totalclassifier, const int flags, const int outputsz);
+ void CreateFactorRelatedBufs(const int rows, const int cols, const int flags,
+ const double scaleFactor, const size_t localThreads[],
+ CvSize minSize, CvSize maxSize);
+ void GenResult(CV_OUT std::vector<cv::Rect>& faces, const std::vector<cv::Rect> &rectList, const std::vector<int> &rweights);
+
+ int m_rows;
+ int m_cols;
+ int m_flags;
+ int m_loopcount;
+ int m_nodenum;
+ bool findBiggestObject;
+ bool initialized;
+ double m_scaleFactor;
+ Size m_minSize;
+ Size m_maxSize;
+ vector<CvSize> sizev;
+ vector<float> scalev;
+ oclMat gimg1, gsum, gsqsum;
+ void * buffers;
+ };
+
+
+ /////////////////////////////// Pyramid /////////////////////////////////////
+ CV_EXPORTS void pyrDown(const oclMat &src, oclMat &dst);
+
+ //! upsamples the source image and then smoothes it
+ CV_EXPORTS void pyrUp(const oclMat &src, oclMat &dst);
+
+ //! performs linear blending of two images
+ //! to avoid accuracy errors sum of weigths shouldn't be very close to zero
+ // supports only CV_8UC1 source type
+ CV_EXPORTS void blendLinear(const oclMat &img1, const oclMat &img2, const oclMat &weights1, const oclMat &weights2, oclMat &result);
+
+ //! computes vertical sum, supports only CV_32FC1 images
+ CV_EXPORTS void columnSum(const oclMat &src, oclMat &sum);
+
+ ///////////////////////////////////////// match_template /////////////////////////////////////////////////////////////
+ struct CV_EXPORTS MatchTemplateBuf
+ {
+ Size user_block_size;
+ oclMat imagef, templf;
+ std::vector<oclMat> images;
+ std::vector<oclMat> image_sums;
+ std::vector<oclMat> image_sqsums;
+ };
+
+ //! computes the proximity map for the raster template and the image where the template is searched for
+ // Supports TM_SQDIFF, TM_SQDIFF_NORMED, TM_CCORR, TM_CCORR_NORMED, TM_CCOEFF, TM_CCOEFF_NORMED for type 8UC1 and 8UC4
+ // Supports TM_SQDIFF, TM_CCORR for type 32FC1 and 32FC4
+ CV_EXPORTS void matchTemplate(const oclMat &image, const oclMat &templ, oclMat &result, int method);
+
+ //! computes the proximity map for the raster template and the image where the template is searched for
+ // Supports TM_SQDIFF, TM_SQDIFF_NORMED, TM_CCORR, TM_CCORR_NORMED, TM_CCOEFF, TM_CCOEFF_NORMED for type 8UC1 and 8UC4
+ // Supports TM_SQDIFF, TM_CCORR for type 32FC1 and 32FC4
+ CV_EXPORTS void matchTemplate(const oclMat &image, const oclMat &templ, oclMat &result, int method, MatchTemplateBuf &buf);
+
+ ///////////////////////////////////////////// Canny /////////////////////////////////////////////
+ struct CV_EXPORTS CannyBuf;
+ //! compute edges of the input image using Canny operator
+ // Support CV_8UC1 only
+ CV_EXPORTS void Canny(const oclMat &image, oclMat &edges, double low_thresh, double high_thresh, int apperture_size = 3, bool L2gradient = false);
+ CV_EXPORTS void Canny(const oclMat &image, CannyBuf &buf, oclMat &edges, double low_thresh, double high_thresh, int apperture_size = 3, bool L2gradient = false);
+ CV_EXPORTS void Canny(const oclMat &dx, const oclMat &dy, oclMat &edges, double low_thresh, double high_thresh, bool L2gradient = false);
+ CV_EXPORTS void Canny(const oclMat &dx, const oclMat &dy, CannyBuf &buf, oclMat &edges, double low_thresh, double high_thresh, bool L2gradient = false);
+
+ struct CV_EXPORTS CannyBuf
+ {
+ CannyBuf() : counter(1, 1, CV_32S) { }
+ ~CannyBuf()
+ {
+ release();
+ }
+ explicit CannyBuf(const Size &image_size, int apperture_size = 3) : counter(1, 1, CV_32S)
+ {
+ create(image_size, apperture_size);
+ }
+ CannyBuf(const oclMat &dx_, const oclMat &dy_);
+
+ void create(const Size &image_size, int apperture_size = 3);
+ void release();
+ oclMat dx, dy;
+ oclMat dx_buf, dy_buf;
+ oclMat edgeBuf;
+ oclMat trackBuf1, trackBuf2;
+ oclMat counter;
+ Ptr<FilterEngine_GPU> filterDX, filterDY;
+ };
+
+ ///////////////////////////////////////// clAmdFft related /////////////////////////////////////////
+ //! Performs a forward or inverse discrete Fourier transform (1D or 2D) of floating point matrix.
+ //! Param dft_size is the size of DFT transform.
+ //!
+ //! For complex-to-real transform it is assumed that the source matrix is packed in CLFFT's format.
+ // support src type of CV32FC1, CV32FC2
+ // support flags: DFT_INVERSE, DFT_REAL_OUTPUT, DFT_COMPLEX_OUTPUT, DFT_ROWS
+ // dft_size is the size of original input, which is used for transformation from complex to real.
+ // dft_size must be powers of 2, 3 and 5
+ // real to complex dft requires at least v1.8 clAmdFft
+ // real to complex dft output is not the same with cpu version
+ // real to complex and complex to real does not support DFT_ROWS
+ CV_EXPORTS void dft(const oclMat &src, oclMat &dst, Size dft_size = Size(), int flags = 0);
+
+ //! implements generalized matrix product algorithm GEMM from BLAS
+ // The functionality requires clAmdBlas library
+ // only support type CV_32FC1
+ // flag GEMM_3_T is not supported
+ CV_EXPORTS void gemm(const oclMat &src1, const oclMat &src2, double alpha,
+ const oclMat &src3, double beta, oclMat &dst, int flags = 0);
+
+ //////////////// HOG (Histogram-of-Oriented-Gradients) Descriptor and Object Detector //////////////
+ struct CV_EXPORTS HOGDescriptor
+ {
+ enum { DEFAULT_WIN_SIGMA = -1 };
+ enum { DEFAULT_NLEVELS = 64 };
+ enum { DESCR_FORMAT_ROW_BY_ROW, DESCR_FORMAT_COL_BY_COL };
+ HOGDescriptor(Size win_size = Size(64, 128), Size block_size = Size(16, 16),
+ Size block_stride = Size(8, 8), Size cell_size = Size(8, 8),
+ int nbins = 9, double win_sigma = DEFAULT_WIN_SIGMA,
+ double threshold_L2hys = 0.2, bool gamma_correction = true,
+ int nlevels = DEFAULT_NLEVELS);
+
+ size_t getDescriptorSize() const;
+ size_t getBlockHistogramSize() const;
+ void setSVMDetector(const vector<float> &detector);
+ static vector<float> getDefaultPeopleDetector();
+ static vector<float> getPeopleDetector48x96();
+ static vector<float> getPeopleDetector64x128();
+ void detect(const oclMat &img, vector<Point> &found_locations,
+ double hit_threshold = 0, Size win_stride = Size(),
+ Size padding = Size());
+ void detectMultiScale(const oclMat &img, vector<Rect> &found_locations,
+ double hit_threshold = 0, Size win_stride = Size(),
+ Size padding = Size(), double scale0 = 1.05,
+ int group_threshold = 2);
+ void getDescriptors(const oclMat &img, Size win_stride,
+ oclMat &descriptors,
+ int descr_format = DESCR_FORMAT_COL_BY_COL);
+ Size win_size;
+ Size block_size;
+ Size block_stride;
+ Size cell_size;
+
+ int nbins;
+ double win_sigma;
+ double threshold_L2hys;
+ bool gamma_correction;
+ int nlevels;
+
+ protected:
+ // initialize buffers; only need to do once in case of multiscale detection
+ void init_buffer(const oclMat &img, Size win_stride);
+ void computeBlockHistograms(const oclMat &img);
+ void computeGradient(const oclMat &img, oclMat &grad, oclMat &qangle);
+ double getWinSigma() const;
+ bool checkDetectorSize() const;
+
+ static int numPartsWithin(int size, int part_size, int stride);
+ static Size numPartsWithin(Size size, Size part_size, Size stride);
+
+ // Coefficients of the separating plane
+ float free_coef;
+ oclMat detector;
+ // Results of the last classification step
+ oclMat labels;
+ Mat labels_host;
+ // Results of the last histogram evaluation step
+ oclMat block_hists;
+ // Gradients conputation results
+ oclMat grad, qangle;
+ // scaled image
+ oclMat image_scale;
+ // effect size of input image (might be different from original size after scaling)
+ Size effect_size;
+
+ private:
+ oclMat gauss_w_lut;
+ };
+
+
+ ////////////////////////feature2d_ocl/////////////////
+ /****************************************************************************************\
+ * Distance *
+ \****************************************************************************************/
+ template<typename T>
+ struct CV_EXPORTS Accumulator
+ {
+ typedef T Type;
+ };
+ template<> struct Accumulator<unsigned char>
+ {
+ typedef float Type;
+ };
+ template<> struct Accumulator<unsigned short>
+ {
+ typedef float Type;
+ };
+ template<> struct Accumulator<char>
+ {
+ typedef float Type;
+ };
+ template<> struct Accumulator<short>
+ {
+ typedef float Type;
+ };
+
+ /*
+ * Manhattan distance (city block distance) functor
+ */
+ template<class T>
+ struct CV_EXPORTS L1
+ {
+ enum { normType = NORM_L1 };
+ typedef T ValueType;
+ typedef typename Accumulator<T>::Type ResultType;
+
+ ResultType operator()( const T *a, const T *b, int size ) const
+ {
+ return normL1<ValueType, ResultType>(a, b, size);
+ }
+ };
+
+ /*
+ * Euclidean distance functor
+ */
+ template<class T>
+ struct CV_EXPORTS L2
+ {
+ enum { normType = NORM_L2 };
+ typedef T ValueType;
+ typedef typename Accumulator<T>::Type ResultType;
+
+ ResultType operator()( const T *a, const T *b, int size ) const
+ {
+ return (ResultType)sqrt((double)normL2Sqr<ValueType, ResultType>(a, b, size));
+ }
+ };
+
+ /*
+ * Hamming distance functor - counts the bit differences between two strings - useful for the Brief descriptor
+ * bit count of A exclusive XOR'ed with B
+ */
+ struct CV_EXPORTS Hamming
+ {
+ enum { normType = NORM_HAMMING };
+ typedef unsigned char ValueType;
+ typedef int ResultType;
+
+ /** this will count the bits in a ^ b
+ */
+ ResultType operator()( const unsigned char *a, const unsigned char *b, int size ) const
+ {
+ return normHamming(a, b, size);
+ }
+ };
+
+ ////////////////////////////////// BruteForceMatcher //////////////////////////////////
+
+ class CV_EXPORTS BruteForceMatcher_OCL_base
+ {
+ public:
+ enum DistType {L1Dist = 0, L2Dist, HammingDist};
+ explicit BruteForceMatcher_OCL_base(DistType distType = L2Dist);
+ // Add descriptors to train descriptor collection
+ void add(const std::vector<oclMat> &descCollection);
+ // Get train descriptors collection
+ const std::vector<oclMat> &getTrainDescriptors() const;
+ // Clear train descriptors collection
+ void clear();
+ // Return true if there are not train descriptors in collection
+ bool empty() const;
+
+ // Return true if the matcher supports mask in match methods
+ bool isMaskSupported() const;
+
+ // Find one best match for each query descriptor
+ void matchSingle(const oclMat &query, const oclMat &train,
+ oclMat &trainIdx, oclMat &distance,
+ const oclMat &mask = oclMat());
+
+ // Download trainIdx and distance and convert it to CPU vector with DMatch
+ static void matchDownload(const oclMat &trainIdx, const oclMat &distance, std::vector<DMatch> &matches);
+ // Convert trainIdx and distance to vector with DMatch
+ static void matchConvert(const Mat &trainIdx, const Mat &distance, std::vector<DMatch> &matches);
+
+ // Find one best match for each query descriptor
+ void match(const oclMat &query, const oclMat &train, std::vector<DMatch> &matches, const oclMat &mask = oclMat());
+
+ // Make gpu collection of trains and masks in suitable format for matchCollection function
+ void makeGpuCollection(oclMat &trainCollection, oclMat &maskCollection, const std::vector<oclMat> &masks = std::vector<oclMat>());
+
+
+ // Find one best match from train collection for each query descriptor
+ void matchCollection(const oclMat &query, const oclMat &trainCollection,
+ oclMat &trainIdx, oclMat &imgIdx, oclMat &distance,
+ const oclMat &masks = oclMat());
+
+ // Download trainIdx, imgIdx and distance and convert it to vector with DMatch
+ static void matchDownload(const oclMat &trainIdx, const oclMat &imgIdx, const oclMat &distance, std::vector<DMatch> &matches);
+ // Convert trainIdx, imgIdx and distance to vector with DMatch
+ static void matchConvert(const Mat &trainIdx, const Mat &imgIdx, const Mat &distance, std::vector<DMatch> &matches);
+
+ // Find one best match from train collection for each query descriptor.
+ void match(const oclMat &query, std::vector<DMatch> &matches, const std::vector<oclMat> &masks = std::vector<oclMat>());
+
+ // Find k best matches for each query descriptor (in increasing order of distances)
+ void knnMatchSingle(const oclMat &query, const oclMat &train,
+ oclMat &trainIdx, oclMat &distance, oclMat &allDist, int k,
+ const oclMat &mask = oclMat());
+
+ // Download trainIdx and distance and convert it to vector with DMatch
+ // compactResult is used when mask is not empty. If compactResult is false matches
+ // vector will have the same size as queryDescriptors rows. If compactResult is true
+ // matches vector will not contain matches for fully masked out query descriptors.
+ static void knnMatchDownload(const oclMat &trainIdx, const oclMat &distance,
+ std::vector< std::vector<DMatch> > &matches, bool compactResult = false);
+
+ // Convert trainIdx and distance to vector with DMatch
+ static void knnMatchConvert(const Mat &trainIdx, const Mat &distance,
+ std::vector< std::vector<DMatch> > &matches, bool compactResult = false);
+
+ // Find k best matches for each query descriptor (in increasing order of distances).
+ // compactResult is used when mask is not empty. If compactResult is false matches
+ // vector will have the same size as queryDescriptors rows. If compactResult is true
+ // matches vector will not contain matches for fully masked out query descriptors.
+ void knnMatch(const oclMat &query, const oclMat &train,
+ std::vector< std::vector<DMatch> > &matches, int k, const oclMat &mask = oclMat(),
+ bool compactResult = false);
+
+ // Find k best matches from train collection for each query descriptor (in increasing order of distances)
+ void knnMatch2Collection(const oclMat &query, const oclMat &trainCollection,
+ oclMat &trainIdx, oclMat &imgIdx, oclMat &distance,
+ const oclMat &maskCollection = oclMat());
+
+ // Download trainIdx and distance and convert it to vector with DMatch
+ // compactResult is used when mask is not empty. If compactResult is false matches
+ // vector will have the same size as queryDescriptors rows. If compactResult is true
+ // matches vector will not contain matches for fully masked out query descriptors.
+ static void knnMatch2Download(const oclMat &trainIdx, const oclMat &imgIdx, const oclMat &distance,
+ std::vector< std::vector<DMatch> > &matches, bool compactResult = false);
+
+ // Convert trainIdx and distance to vector with DMatch
+ static void knnMatch2Convert(const Mat &trainIdx, const Mat &imgIdx, const Mat &distance,
+ std::vector< std::vector<DMatch> > &matches, bool compactResult = false);
+
+ // Find k best matches for each query descriptor (in increasing order of distances).
+ // compactResult is used when mask is not empty. If compactResult is false matches
+ // vector will have the same size as queryDescriptors rows. If compactResult is true
+ // matches vector will not contain matches for fully masked out query descriptors.
+ void knnMatch(const oclMat &query, std::vector< std::vector<DMatch> > &matches, int k,
+ const std::vector<oclMat> &masks = std::vector<oclMat>(), bool compactResult = false);
+
+ // Find best matches for each query descriptor which have distance less than maxDistance.
+ // nMatches.at<int>(0, queryIdx) will contain matches count for queryIdx.
+ // carefully nMatches can be greater than trainIdx.cols - it means that matcher didn't find all matches,
+ // because it didn't have enough memory.
+ // If trainIdx is empty, then trainIdx and distance will be created with size nQuery x max((nTrain / 100), 10),
+ // otherwize user can pass own allocated trainIdx and distance with size nQuery x nMaxMatches
+ // Matches doesn't sorted.
+ void radiusMatchSingle(const oclMat &query, const oclMat &train,
+ oclMat &trainIdx, oclMat &distance, oclMat &nMatches, float maxDistance,
+ const oclMat &mask = oclMat());
+
+ // Download trainIdx, nMatches and distance and convert it to vector with DMatch.
+ // matches will be sorted in increasing order of distances.
+ // compactResult is used when mask is not empty. If compactResult is false matches
+ // vector will have the same size as queryDescriptors rows. If compactResult is true
+ // matches vector will not contain matches for fully masked out query descriptors.
+ static void radiusMatchDownload(const oclMat &trainIdx, const oclMat &distance, const oclMat &nMatches,
+ std::vector< std::vector<DMatch> > &matches, bool compactResult = false);
+ // Convert trainIdx, nMatches and distance to vector with DMatch.
+ static void radiusMatchConvert(const Mat &trainIdx, const Mat &distance, const Mat &nMatches,
+ std::vector< std::vector<DMatch> > &matches, bool compactResult = false);
+ // Find best matches for each query descriptor which have distance less than maxDistance
+ // in increasing order of distances).
+ void radiusMatch(const oclMat &query, const oclMat &train,
+ std::vector< std::vector<DMatch> > &matches, float maxDistance,
+ const oclMat &mask = oclMat(), bool compactResult = false);
+ // Find best matches for each query descriptor which have distance less than maxDistance.
+ // If trainIdx is empty, then trainIdx and distance will be created with size nQuery x max((nQuery / 100), 10),
+ // otherwize user can pass own allocated trainIdx and distance with size nQuery x nMaxMatches
+ // Matches doesn't sorted.
+ void radiusMatchCollection(const oclMat &query, oclMat &trainIdx, oclMat &imgIdx, oclMat &distance, oclMat &nMatches, float maxDistance,
+ const std::vector<oclMat> &masks = std::vector<oclMat>());
+ // Download trainIdx, imgIdx, nMatches and distance and convert it to vector with DMatch.
+ // matches will be sorted in increasing order of distances.
+ // compactResult is used when mask is not empty. If compactResult is false matches
+ // vector will have the same size as queryDescriptors rows. If compactResult is true
+ // matches vector will not contain matches for fully masked out query descriptors.
+ static void radiusMatchDownload(const oclMat &trainIdx, const oclMat &imgIdx, const oclMat &distance, const oclMat &nMatches,
+ std::vector< std::vector<DMatch> > &matches, bool compactResult = false);
+ // Convert trainIdx, nMatches and distance to vector with DMatch.
+ static void radiusMatchConvert(const Mat &trainIdx, const Mat &imgIdx, const Mat &distance, const Mat &nMatches,
+ std::vector< std::vector<DMatch> > &matches, bool compactResult = false);
+ // Find best matches from train collection for each query descriptor which have distance less than
+ // maxDistance (in increasing order of distances).
+ void radiusMatch(const oclMat &query, std::vector< std::vector<DMatch> > &matches, float maxDistance,
+ const std::vector<oclMat> &masks = std::vector<oclMat>(), bool compactResult = false);
+ DistType distType;
+ private:
+ std::vector<oclMat> trainDescCollection;
+ };
+
+ template <class Distance>
+ class CV_EXPORTS BruteForceMatcher_OCL;
+
+ template <typename T>
+ class CV_EXPORTS BruteForceMatcher_OCL< L1<T> > : public BruteForceMatcher_OCL_base
+ {
+ public:
+ explicit BruteForceMatcher_OCL() : BruteForceMatcher_OCL_base(L1Dist) {}
+ explicit BruteForceMatcher_OCL(L1<T> /*d*/) : BruteForceMatcher_OCL_base(L1Dist) {}
+ };
+
+ template <typename T>
+ class CV_EXPORTS BruteForceMatcher_OCL< L2<T> > : public BruteForceMatcher_OCL_base
+ {
+ public:
+ explicit BruteForceMatcher_OCL() : BruteForceMatcher_OCL_base(L2Dist) {}
+ explicit BruteForceMatcher_OCL(L2<T> /*d*/) : BruteForceMatcher_OCL_base(L2Dist) {}
+ };
+
+ template <> class CV_EXPORTS BruteForceMatcher_OCL< Hamming > : public BruteForceMatcher_OCL_base
+ {
+ public:
+ explicit BruteForceMatcher_OCL() : BruteForceMatcher_OCL_base(HammingDist) {}
+ explicit BruteForceMatcher_OCL(Hamming /*d*/) : BruteForceMatcher_OCL_base(HammingDist) {}
+ };
+
+ class CV_EXPORTS BFMatcher_OCL : public BruteForceMatcher_OCL_base
+ {
+ public:
+ explicit BFMatcher_OCL(int norm = NORM_L2) : BruteForceMatcher_OCL_base(norm == NORM_L1 ? L1Dist : norm == NORM_L2 ? L2Dist : HammingDist) {}
+ };
+
+ class CV_EXPORTS GoodFeaturesToTrackDetector_OCL
+ {
+ public:
+ explicit GoodFeaturesToTrackDetector_OCL(int maxCorners = 1000, double qualityLevel = 0.01, double minDistance = 0.0,
+ int blockSize = 3, bool useHarrisDetector = false, double harrisK = 0.04);
+
+ //! return 1 rows matrix with CV_32FC2 type
+ void operator ()(const oclMat& image, oclMat& corners, const oclMat& mask = oclMat());
+ //! download points of type Point2f to a vector. the vector's content will be erased
+ void downloadPoints(const oclMat &points, vector<Point2f> &points_v);
+
+ int maxCorners;
+ double qualityLevel;
+ double minDistance;
+
+ int blockSize;
+ bool useHarrisDetector;
+ double harrisK;
+ void releaseMemory()
+ {
+ Dx_.release();
+ Dy_.release();
+ eig_.release();
+ minMaxbuf_.release();
+ tmpCorners_.release();
+ }
+ private:
+ oclMat Dx_;
+ oclMat Dy_;
+ oclMat eig_;
+ oclMat eig_minmax_;
+ oclMat minMaxbuf_;
+ oclMat tmpCorners_;
+ oclMat counter_;
+ };
+
+ inline GoodFeaturesToTrackDetector_OCL::GoodFeaturesToTrackDetector_OCL(int maxCorners_, double qualityLevel_, double minDistance_,
+ int blockSize_, bool useHarrisDetector_, double harrisK_)
+ {
+ maxCorners = maxCorners_;
+ qualityLevel = qualityLevel_;
+ minDistance = minDistance_;
+ blockSize = blockSize_;
+ useHarrisDetector = useHarrisDetector_;
+ harrisK = harrisK_;
+ }
+
+ /////////////////////////////// PyrLKOpticalFlow /////////////////////////////////////
+ class CV_EXPORTS PyrLKOpticalFlow
+ {
+ public:
+ PyrLKOpticalFlow()
+ {
+ winSize = Size(21, 21);
+ maxLevel = 3;
+ iters = 30;
+ derivLambda = 0.5;
+ useInitialFlow = false;
+ minEigThreshold = 1e-4f;
+ getMinEigenVals = false;
+ isDeviceArch11_ = false;
+ }
+
+ void sparse(const oclMat &prevImg, const oclMat &nextImg, const oclMat &prevPts, oclMat &nextPts,
+ oclMat &status, oclMat *err = 0);
+ void dense(const oclMat &prevImg, const oclMat &nextImg, oclMat &u, oclMat &v, oclMat *err = 0);
+ Size winSize;
+ int maxLevel;
+ int iters;
+ double derivLambda;
+ bool useInitialFlow;
+ float minEigThreshold;
+ bool getMinEigenVals;
+ void releaseMemory()
+ {
+ dx_calcBuf_.release();
+ dy_calcBuf_.release();
+
+ prevPyr_.clear();
+ nextPyr_.clear();
+
+ dx_buf_.release();
+ dy_buf_.release();
+ }
+ private:
+ void calcSharrDeriv(const oclMat &src, oclMat &dx, oclMat &dy);
+ void buildImagePyramid(const oclMat &img0, vector<oclMat> &pyr, bool withBorder);
+
+ oclMat dx_calcBuf_;
+ oclMat dy_calcBuf_;
+
+ vector<oclMat> prevPyr_;
+ vector<oclMat> nextPyr_;
+
+ oclMat dx_buf_;
+ oclMat dy_buf_;
+ oclMat uPyr_[2];
+ oclMat vPyr_[2];
+ bool isDeviceArch11_;
+ };
+
+ class CV_EXPORTS FarnebackOpticalFlow
+ {
+ public:
+ FarnebackOpticalFlow();
+
+ int numLevels;
+ double pyrScale;
+ bool fastPyramids;
+ int winSize;
+ int numIters;
+ int polyN;
+ double polySigma;
+ int flags;
+
+ void operator ()(const oclMat &frame0, const oclMat &frame1, oclMat &flowx, oclMat &flowy);
+
+ void releaseMemory();
+
+ private:
+ void setGaussianBlurKernel(const float *c_gKer, int ksizeHalf);
+
+ void gaussianBlurOcl(const oclMat &src, int ksizeHalf, oclMat &dst);
+
+ void polynomialExpansionOcl(
+ const oclMat &src, int polyN, oclMat &dst);
+
+ void gaussianBlur5Ocl(
+ const oclMat &src, int ksizeHalf, oclMat &dst);
+
+ void prepareGaussian(
+ int n, double sigma, float *g, float *xg, float *xxg,
+ double &ig11, double &ig03, double &ig33, double &ig55);
+
+ void setPolynomialExpansionConsts(int n, double sigma);
+
+ void updateFlow_boxFilter(
+ const oclMat& R0, const oclMat& R1, oclMat& flowx, oclMat &flowy,
+ oclMat& M, oclMat &bufM, int blockSize, bool updateMatrices);
+
+ void updateFlow_gaussianBlur(
+ const oclMat& R0, const oclMat& R1, oclMat& flowx, oclMat& flowy,
+ oclMat& M, oclMat &bufM, int blockSize, bool updateMatrices);
+
+ oclMat frames_[2];
+ oclMat pyrLevel_[2], M_, bufM_, R_[2], blurredFrame_[2];
+ std::vector<oclMat> pyramid0_, pyramid1_;
+ float ig[4];
+ oclMat gMat;
+ oclMat xgMat;
+ oclMat xxgMat;
+ oclMat gKerMat;
+ };
+
+ //////////////// build warping maps ////////////////////
+ //! builds plane warping maps
+ CV_EXPORTS void buildWarpPlaneMaps(Size src_size, Rect dst_roi, const Mat &K, const Mat &R, const Mat &T, float scale, oclMat &map_x, oclMat &map_y);
+ //! builds cylindrical warping maps
+ CV_EXPORTS void buildWarpCylindricalMaps(Size src_size, Rect dst_roi, const Mat &K, const Mat &R, float scale, oclMat &map_x, oclMat &map_y);
+ //! builds spherical warping maps
+ CV_EXPORTS void buildWarpSphericalMaps(Size src_size, Rect dst_roi, const Mat &K, const Mat &R, float scale, oclMat &map_x, oclMat &map_y);
+ //! builds Affine warping maps
+ CV_EXPORTS void buildWarpAffineMaps(const Mat &M, bool inverse, Size dsize, oclMat &xmap, oclMat &ymap);
+
+ //! builds Perspective warping maps
+ CV_EXPORTS void buildWarpPerspectiveMaps(const Mat &M, bool inverse, Size dsize, oclMat &xmap, oclMat &ymap);
+
+ ///////////////////////////////////// interpolate frames //////////////////////////////////////////////
+ //! Interpolate frames (images) using provided optical flow (displacement field).
+ //! frame0 - frame 0 (32-bit floating point images, single channel)
+ //! frame1 - frame 1 (the same type and size)
+ //! fu - forward horizontal displacement
+ //! fv - forward vertical displacement
+ //! bu - backward horizontal displacement
+ //! bv - backward vertical displacement
+ //! pos - new frame position
+ //! newFrame - new frame
+ //! buf - temporary buffer, will have width x 6*height size, CV_32FC1 type and contain 6 oclMat;
+ //! occlusion masks 0, occlusion masks 1,
+ //! interpolated forward flow 0, interpolated forward flow 1,
+ //! interpolated backward flow 0, interpolated backward flow 1
+ //!
+ CV_EXPORTS void interpolateFrames(const oclMat &frame0, const oclMat &frame1,
+ const oclMat &fu, const oclMat &fv,
+ const oclMat &bu, const oclMat &bv,
+ float pos, oclMat &newFrame, oclMat &buf);
+
+ //! computes moments of the rasterized shape or a vector of points
+ //! _array should be a vector a points standing for the contour
+ CV_EXPORTS Moments ocl_moments(InputArray contour);
+ //! src should be a general image uploaded to the GPU.
+ //! the supported oclMat type are CV_8UC1, CV_16UC1, CV_16SC1, CV_32FC1 and CV_64FC1
+ //! to use type of CV_64FC1, the GPU should support CV_64FC1
+ CV_EXPORTS Moments ocl_moments(oclMat& src, bool binary);
+
+ class CV_EXPORTS StereoBM_OCL
+ {
+ public:
+ enum { BASIC_PRESET = 0, PREFILTER_XSOBEL = 1 };
+
+ enum { DEFAULT_NDISP = 64, DEFAULT_WINSZ = 19 };
+
+ //! the default constructor
+ StereoBM_OCL();
+ //! the full constructor taking the camera-specific preset, number of disparities and the SAD window size. ndisparities must be multiple of 8.
+ StereoBM_OCL(int preset, int ndisparities = DEFAULT_NDISP, int winSize = DEFAULT_WINSZ);
+
+ //! the stereo correspondence operator. Finds the disparity for the specified rectified stereo pair
+ //! Output disparity has CV_8U type.
+ void operator() ( const oclMat &left, const oclMat &right, oclMat &disparity);
+
+ //! Some heuristics that tries to estmate
+ // if current GPU will be faster then CPU in this algorithm.
+ // It queries current active device.
+ static bool checkIfGpuCallReasonable();
+
+ int preset;
+ int ndisp;
+ int winSize;
+
+ // If avergeTexThreshold == 0 => post procesing is disabled
+ // If avergeTexThreshold != 0 then disparity is set 0 in each point (x,y) where for left image
+ // SumOfHorizontalGradiensInWindow(x, y, winSize) < (winSize * winSize) * avergeTexThreshold
+ // i.e. input left image is low textured.
+ float avergeTexThreshold;
+ private:
+ oclMat minSSD, leBuf, riBuf;
+ };
+
+ class CV_EXPORTS StereoBeliefPropagation
+ {
+ public:
+ enum { DEFAULT_NDISP = 64 };
+ enum { DEFAULT_ITERS = 5 };
+ enum { DEFAULT_LEVELS = 5 };
+ static void estimateRecommendedParams(int width, int height, int &ndisp, int &iters, int &levels);
+ explicit StereoBeliefPropagation(int ndisp = DEFAULT_NDISP,
+ int iters = DEFAULT_ITERS,
+ int levels = DEFAULT_LEVELS,
+ int msg_type = CV_16S);
+ StereoBeliefPropagation(int ndisp, int iters, int levels,
+ float max_data_term, float data_weight,
+ float max_disc_term, float disc_single_jump,
+ int msg_type = CV_32F);
+ void operator()(const oclMat &left, const oclMat &right, oclMat &disparity);
+ void operator()(const oclMat &data, oclMat &disparity);
+ int ndisp;
+ int iters;
+ int levels;
+ float max_data_term;
+ float data_weight;
+ float max_disc_term;
+ float disc_single_jump;
+ int msg_type;
+ private:
+ oclMat u, d, l, r, u2, d2, l2, r2;
+ std::vector<oclMat> datas;
+ oclMat out;
+ };
+
+ class CV_EXPORTS StereoConstantSpaceBP
+ {
+ public:
+ enum { DEFAULT_NDISP = 128 };
+ enum { DEFAULT_ITERS = 8 };
+ enum { DEFAULT_LEVELS = 4 };
+ enum { DEFAULT_NR_PLANE = 4 };
+ static void estimateRecommendedParams(int width, int height, int &ndisp, int &iters, int &levels, int &nr_plane);
+ explicit StereoConstantSpaceBP(
+ int ndisp = DEFAULT_NDISP,
+ int iters = DEFAULT_ITERS,
+ int levels = DEFAULT_LEVELS,
+ int nr_plane = DEFAULT_NR_PLANE,
+ int msg_type = CV_32F);
+ StereoConstantSpaceBP(int ndisp, int iters, int levels, int nr_plane,
+ float max_data_term, float data_weight, float max_disc_term, float disc_single_jump,
+ int min_disp_th = 0,
+ int msg_type = CV_32F);
+ void operator()(const oclMat &left, const oclMat &right, oclMat &disparity);
+ int ndisp;
+ int iters;
+ int levels;
+ int nr_plane;
+ float max_data_term;
+ float data_weight;
+ float max_disc_term;
+ float disc_single_jump;
+ int min_disp_th;
+ int msg_type;
+ bool use_local_init_data_cost;
+ private:
+ oclMat u[2], d[2], l[2], r[2];
+ oclMat disp_selected_pyr[2];
+ oclMat data_cost;
+ oclMat data_cost_selected;
+ oclMat temp;
+ oclMat out;
+ };
+
+ // Implementation of the Zach, Pock and Bischof Dual TV-L1 Optical Flow method
+ //
+ // see reference:
+ // [1] C. Zach, T. Pock and H. Bischof, "A Duality Based Approach for Realtime TV-L1 Optical Flow".
+ // [2] Javier Sanchez, Enric Meinhardt-Llopis and Gabriele Facciolo. "TV-L1 Optical Flow Estimation".
+ class CV_EXPORTS OpticalFlowDual_TVL1_OCL
+ {
+ public:
+ OpticalFlowDual_TVL1_OCL();
+
+ void operator ()(const oclMat& I0, const oclMat& I1, oclMat& flowx, oclMat& flowy);
+
+ void collectGarbage();
+
+ /**
+ * Time step of the numerical scheme.
+ */
+ double tau;
+
+ /**
+ * Weight parameter for the data term, attachment parameter.
+ * This is the most relevant parameter, which determines the smoothness of the output.
+ * The smaller this parameter is, the smoother the solutions we obtain.
+ * It depends on the range of motions of the images, so its value should be adapted to each image sequence.
+ */
+ double lambda;
+
+ /**
+ * Weight parameter for (u - v)^2, tightness parameter.
+ * It serves as a link between the attachment and the regularization terms.
+ * In theory, it should have a small value in order to maintain both parts in correspondence.
+ * The method is stable for a large range of values of this parameter.
+ */
+ double theta;
+
+ /**
+ * Number of scales used to create the pyramid of images.
+ */
+ int nscales;
+
+ /**
+ * Number of warpings per scale.
+ * Represents the number of times that I1(x+u0) and grad( I1(x+u0) ) are computed per scale.
+ * This is a parameter that assures the stability of the method.
+ * It also affects the running time, so it is a compromise between speed and accuracy.
+ */
+ int warps;
+
+ /**
+ * Stopping criterion threshold used in the numerical scheme, which is a trade-off between precision and running time.
+ * A small value will yield more accurate solutions at the expense of a slower convergence.
+ */
+ double epsilon;
+
+ /**
+ * Stopping criterion iterations number used in the numerical scheme.
+ */
+ int iterations;
+
+ bool useInitialFlow;
+
+ private:
+ void procOneScale(const oclMat& I0, const oclMat& I1, oclMat& u1, oclMat& u2);
+
+ std::vector<oclMat> I0s;
+ std::vector<oclMat> I1s;
+ std::vector<oclMat> u1s;
+ std::vector<oclMat> u2s;
+
+ oclMat I1x_buf;
+ oclMat I1y_buf;
+
+ oclMat I1w_buf;
+ oclMat I1wx_buf;
+ oclMat I1wy_buf;
+
+ oclMat grad_buf;
+ oclMat rho_c_buf;
+
+ oclMat p11_buf;
+ oclMat p12_buf;
+ oclMat p21_buf;
+ oclMat p22_buf;
+
+ oclMat diff_buf;
+ oclMat norm_buf;
+ };
+ // current supported sorting methods
+ enum
+ {
+ SORT_BITONIC, // only support power-of-2 buffer size
+ SORT_SELECTION, // cannot sort duplicate keys
+ SORT_MERGE,
+ SORT_RADIX // only support signed int/float keys(CV_32S/CV_32F)
+ };
+ //! Returns the sorted result of all the elements in input based on equivalent keys.
+ //
+ // The element unit in the values to be sorted is determined from the data type,
+ // i.e., a CV_32FC2 input {a1a2, b1b2} will be considered as two elements, regardless its
+ // matrix dimension.
+ // both keys and values will be sorted inplace
+ // Key needs to be single channel oclMat.
+ //
+ // Example:
+ // input -
+ // keys = {2, 3, 1} (CV_8UC1)
+ // values = {10,5, 4,3, 6,2} (CV_8UC2)
+ // sortByKey(keys, values, SORT_SELECTION, false);
+ // output -
+ // keys = {1, 2, 3} (CV_8UC1)
+ // values = {6,2, 10,5, 4,3} (CV_8UC2)
+ CV_EXPORTS void sortByKey(oclMat& keys, oclMat& values, int method, bool isGreaterThan = false);
+ /*!Base class for MOG and MOG2!*/
+ class CV_EXPORTS BackgroundSubtractor
+ {
+ public:
+ //! the virtual destructor
+ virtual ~BackgroundSubtractor();
+ //! the update operator that takes the next video frame and returns the current foreground mask as 8-bit binary image.
+ virtual void operator()(const oclMat& image, oclMat& fgmask, float learningRate);
+
+ //! computes a background image
+ virtual void getBackgroundImage(oclMat& backgroundImage) const = 0;
+ };
+ /*!
+ Gaussian Mixture-based Backbround/Foreground Segmentation Algorithm
+
+ The class implements the following algorithm:
+ "An improved adaptive background mixture model for real-time tracking with shadow detection"
+ P. KadewTraKuPong and R. Bowden,
+ Proc. 2nd European Workshp on Advanced Video-Based Surveillance Systems, 2001."
+ http://personal.ee.surrey.ac.uk/Personal/R.Bowden/publications/avbs01/avbs01.pdf
+ */
+ class CV_EXPORTS MOG: public cv::ocl::BackgroundSubtractor
+ {
+ public:
+ //! the default constructor
+ MOG(int nmixtures = -1);
+
+ //! re-initiaization method
+ void initialize(Size frameSize, int frameType);
+
+ //! the update operator
+ void operator()(const oclMat& frame, oclMat& fgmask, float learningRate = 0.f);
+
+ //! computes a background image which are the mean of all background gaussians
+ void getBackgroundImage(oclMat& backgroundImage) const;
+
+ //! releases all inner buffers
+ void release();
+
+ int history;
+ float varThreshold;
+ float backgroundRatio;
+ float noiseSigma;
+
+ private:
+ int nmixtures_;
+
+ Size frameSize_;
+ int frameType_;
+ int nframes_;
+
+ oclMat weight_;
+ oclMat sortKey_;
+ oclMat mean_;
+ oclMat var_;
+ };
+
+ /*!
+ The class implements the following algorithm:
+ "Improved adaptive Gausian mixture model for background subtraction"
+ Z.Zivkovic
+ International Conference Pattern Recognition, UK, August, 2004.
+ http://www.zoranz.net/Publications/zivkovic2004ICPR.pdf
+ */
+ class CV_EXPORTS MOG2: public cv::ocl::BackgroundSubtractor
+ {
+ public:
+ //! the default constructor
+ MOG2(int nmixtures = -1);
+
+ //! re-initiaization method
+ void initialize(Size frameSize, int frameType);
+
+ //! the update operator
+ void operator()(const oclMat& frame, oclMat& fgmask, float learningRate = -1.0f);
+
+ //! computes a background image which are the mean of all background gaussians
+ void getBackgroundImage(oclMat& backgroundImage) const;
+
+ //! releases all inner buffers
+ void release();
+
+ // parameters
+ // you should call initialize after parameters changes
+
+ int history;
+
+ //! here it is the maximum allowed number of mixture components.
+ //! Actual number is determined dynamically per pixel
+ float varThreshold;
+ // threshold on the squared Mahalanobis distance to decide if it is well described
+ // by the background model or not. Related to Cthr from the paper.
+ // This does not influence the update of the background. A typical value could be 4 sigma
+ // and that is varThreshold=4*4=16; Corresponds to Tb in the paper.
+
+ /////////////////////////
+ // less important parameters - things you might change but be carefull
+ ////////////////////////
+
+ float backgroundRatio;
+ // corresponds to fTB=1-cf from the paper
+ // TB - threshold when the component becomes significant enough to be included into
+ // the background model. It is the TB=1-cf from the paper. So I use cf=0.1 => TB=0.
+ // For alpha=0.001 it means that the mode should exist for approximately 105 frames before
+ // it is considered foreground
+ // float noiseSigma;
+ float varThresholdGen;
+
+ //correspondts to Tg - threshold on the squared Mahalan. dist. to decide
+ //when a sample is close to the existing components. If it is not close
+ //to any a new component will be generated. I use 3 sigma => Tg=3*3=9.
+ //Smaller Tg leads to more generated components and higher Tg might make
+ //lead to small number of components but they can grow too large
+ float fVarInit;
+ float fVarMin;
+ float fVarMax;
+
+ //initial variance for the newly generated components.
+ //It will will influence the speed of adaptation. A good guess should be made.
+ //A simple way is to estimate the typical standard deviation from the images.
+ //I used here 10 as a reasonable value
+ // min and max can be used to further control the variance
+ float fCT; //CT - complexity reduction prior
+ //this is related to the number of samples needed to accept that a component
+ //actually exists. We use CT=0.05 of all the samples. By setting CT=0 you get
+ //the standard Stauffer&Grimson algorithm (maybe not exact but very similar)
+
+ //shadow detection parameters
+ bool bShadowDetection; //default 1 - do shadow detection
+ unsigned char nShadowDetection; //do shadow detection - insert this value as the detection result - 127 default value
+ float fTau;
+ // Tau - shadow threshold. The shadow is detected if the pixel is darker
+ //version of the background. Tau is a threshold on how much darker the shadow can be.
+ //Tau= 0.5 means that if pixel is more than 2 times darker then it is not shadow
+ //See: Prati,Mikic,Trivedi,Cucchiarra,"Detecting Moving Shadows...",IEEE PAMI,2003.
+
+ private:
+ int nmixtures_;
+
+ Size frameSize_;
+ int frameType_;
+ int nframes_;
+
+ oclMat weight_;
+ oclMat variance_;
+ oclMat mean_;
+
+ oclMat bgmodelUsedModes_; //keep track of number of modes per pixel
+ };
+
+ /*!***************Kalman Filter*************!*/
+ class CV_EXPORTS KalmanFilter
+ {
+ public:
+ KalmanFilter();
+ //! the full constructor taking the dimensionality of the state, of the measurement and of the control vector
+ KalmanFilter(int dynamParams, int measureParams, int controlParams=0, int type=CV_32F);
+ //! re-initializes Kalman filter. The previous content is destroyed.
+ void init(int dynamParams, int measureParams, int controlParams=0, int type=CV_32F);
+
+ const oclMat& predict(const oclMat& control=oclMat());
+ const oclMat& correct(const oclMat& measurement);
+
+ oclMat statePre; //!< predicted state (x'(k)): x(k)=A*x(k-1)+B*u(k)
+ oclMat statePost; //!< corrected state (x(k)): x(k)=x'(k)+K(k)*(z(k)-H*x'(k))
+ oclMat transitionMatrix; //!< state transition matrix (A)
+ oclMat controlMatrix; //!< control matrix (B) (not used if there is no control)
+ oclMat measurementMatrix; //!< measurement matrix (H)
+ oclMat processNoiseCov; //!< process noise covariance matrix (Q)
+ oclMat measurementNoiseCov;//!< measurement noise covariance matrix (R)
+ oclMat errorCovPre; //!< priori error estimate covariance matrix (P'(k)): P'(k)=A*P(k-1)*At + Q)*/
+ oclMat gain; //!< Kalman gain matrix (K(k)): K(k)=P'(k)*Ht*inv(H*P'(k)*Ht+R)
+ oclMat errorCovPost; //!< posteriori error estimate covariance matrix (P(k)): P(k)=(I-K(k)*H)*P'(k)
+ private:
+ oclMat temp1;
+ oclMat temp2;
+ oclMat temp3;
+ oclMat temp4;
+ oclMat temp5;
+ };
+
+ /*!***************K Nearest Neighbour*************!*/
+ class CV_EXPORTS KNearestNeighbour: public CvKNearest
+ {
+ public:
+ KNearestNeighbour();
+ ~KNearestNeighbour();
+
+ bool train(const Mat& trainData, Mat& labels, Mat& sampleIdx = Mat().setTo(Scalar::all(0)),
+ bool isRegression = false, int max_k = 32, bool updateBase = false);
+
+ void clear();
+
+ void find_nearest(const oclMat& samples, int k, oclMat& lables);
+
+ private:
+ oclMat samples_ocl;
+ };
+
+ /*!*************** SVM *************!*/
+ class CV_EXPORTS CvSVM_OCL : public CvSVM
+ {
+ public:
+ CvSVM_OCL();
+
+ CvSVM_OCL(const cv::Mat& trainData, const cv::Mat& responses,
+ const cv::Mat& varIdx=cv::Mat(), const cv::Mat& sampleIdx=cv::Mat(),
+ CvSVMParams params=CvSVMParams());
+ CV_WRAP float predict( const int row_index, Mat& src, bool returnDFVal=false ) const;
+ CV_WRAP void predict( cv::InputArray samples, cv::OutputArray results ) const;
+ CV_WRAP float predict( const cv::Mat& sample, bool returnDFVal=false ) const;
+ float predict( const CvMat* samples, CV_OUT CvMat* results ) const;
+
+ protected:
+ float predict( const int row_index, int row_len, Mat& src, bool returnDFVal=false ) const;
+ void create_kernel();
+ void create_solver();
+ };
+
+ /*!*************** END *************!*/
+ }
+}
+#if defined _MSC_VER && _MSC_VER >= 1200
+# pragma warning( push)
+# pragma warning( disable: 4267)
+#endif
+#include "opencv2/ocl/matrix_operations.hpp"
+#if defined _MSC_VER && _MSC_VER >= 1200
+# pragma warning( pop)
+#endif
+
+#endif /* __OPENCV_OCL_HPP__ */
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/opencv.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/opencv.hpp
new file mode 100644
index 00000000..f76da92f
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/opencv.hpp
@@ -0,0 +1,83 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009-2010, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_ALL_HPP__
+#define __OPENCV_ALL_HPP__
+
+#include "opencv2/opencv_modules.hpp"
+
+#include "opencv2/core/core_c.h"
+#include "opencv2/core/core.hpp"
+#ifdef HAVE_OPENCV_FLANN
+#include "opencv2/flann/miniflann.hpp"
+#endif
+#ifdef HAVE_OPENCV_IMGPROC
+#include "opencv2/imgproc/imgproc_c.h"
+#include "opencv2/imgproc/imgproc.hpp"
+#endif
+#ifdef HAVE_OPENCV_PHOTO
+#include "opencv2/photo/photo.hpp"
+#endif
+#ifdef HAVE_OPENCV_VIDEO
+#include "opencv2/video/video.hpp"
+#endif
+#ifdef HAVE_OPENCV_FEATURES2D
+#include "opencv2/features2d/features2d.hpp"
+#endif
+#ifdef HAVE_OPENCV_OBJDETECT
+#include "opencv2/objdetect/objdetect.hpp"
+#endif
+#ifdef HAVE_OPENCV_CALIB3D
+#include "opencv2/calib3d/calib3d.hpp"
+#endif
+#ifdef HAVE_OPENCV_ML
+#include "opencv2/ml/ml.hpp"
+#endif
+#ifdef HAVE_OPENCV_HIGHGUI
+#include "opencv2/highgui/highgui_c.h"
+#include "opencv2/highgui/highgui.hpp"
+#endif
+#ifdef HAVE_OPENCV_CONTRIB
+#include "opencv2/contrib/contrib.hpp"
+#endif
+
+#endif
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/opencv_modules.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/opencv_modules.hpp
new file mode 100644
index 00000000..4425eec5
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/opencv_modules.hpp
@@ -0,0 +1,29 @@
+/*
+ * ** File generated automatically, do not modify **
+ *
+ * This file defines the list of modules available in current build configuration
+ *
+ *
+*/
+
+#define HAVE_OPENCV_CALIB3D
+#define HAVE_OPENCV_CONTRIB
+#define HAVE_OPENCV_CORE
+#define HAVE_OPENCV_FEATURES2D
+#define HAVE_OPENCV_FLANN
+#define HAVE_OPENCV_GPU
+#define HAVE_OPENCV_HIGHGUI
+#define HAVE_OPENCV_IMGPROC
+#define HAVE_OPENCV_LEGACY
+#define HAVE_OPENCV_ML
+#define HAVE_OPENCV_NONFREE
+#define HAVE_OPENCV_OBJDETECT
+#define HAVE_OPENCV_OCL
+#define HAVE_OPENCV_PHOTO
+#define HAVE_OPENCV_STITCHING
+#define HAVE_OPENCV_SUPERRES
+#define HAVE_OPENCV_TS
+#define HAVE_OPENCV_VIDEO
+#define HAVE_OPENCV_VIDEOSTAB
+
+
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/photo.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/photo.hpp
new file mode 100644
index 00000000..521a1ade
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/photo.hpp
@@ -0,0 +1,43 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2008-2012, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#include "opencv2/photo/photo.hpp"
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/photo/photo.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/photo/photo.hpp
new file mode 100644
index 00000000..66973772
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/photo/photo.hpp
@@ -0,0 +1,91 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2008-2012, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_PHOTO_HPP__
+#define __OPENCV_PHOTO_HPP__
+
+#include "opencv2/core/core.hpp"
+#include "opencv2/imgproc/imgproc.hpp"
+
+#include "opencv2/photo/photo_c.h"
+
+#ifdef __cplusplus
+
+/*! \namespace cv
+ Namespace where all the C++ OpenCV functionality resides
+ */
+namespace cv
+{
+
+//! the inpainting algorithm
+enum
+{
+ INPAINT_NS=CV_INPAINT_NS, // Navier-Stokes algorithm
+ INPAINT_TELEA=CV_INPAINT_TELEA // A. Telea algorithm
+};
+
+//! restores the damaged image areas using one of the available intpainting algorithms
+CV_EXPORTS_W void inpaint( InputArray src, InputArray inpaintMask,
+ OutputArray dst, double inpaintRadius, int flags );
+
+
+CV_EXPORTS_W void fastNlMeansDenoising( InputArray src, OutputArray dst, float h = 3,
+ int templateWindowSize = 7, int searchWindowSize = 21);
+
+CV_EXPORTS_W void fastNlMeansDenoisingColored( InputArray src, OutputArray dst,
+ float h = 3, float hColor = 3,
+ int templateWindowSize = 7, int searchWindowSize = 21);
+
+CV_EXPORTS_W void fastNlMeansDenoisingMulti( InputArrayOfArrays srcImgs, OutputArray dst,
+ int imgToDenoiseIndex, int temporalWindowSize,
+ float h = 3, int templateWindowSize = 7, int searchWindowSize = 21);
+
+CV_EXPORTS_W void fastNlMeansDenoisingColoredMulti( InputArrayOfArrays srcImgs, OutputArray dst,
+ int imgToDenoiseIndex, int temporalWindowSize,
+ float h = 3, float hColor = 3,
+ int templateWindowSize = 7, int searchWindowSize = 21);
+
+}
+
+#endif //__cplusplus
+
+#endif
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/photo/photo_c.h b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/photo/photo_c.h
new file mode 100644
index 00000000..4ca05f25
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/photo/photo_c.h
@@ -0,0 +1,69 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2008-2012, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_PHOTO_C_H__
+#define __OPENCV_PHOTO_C_H__
+
+#include "opencv2/core/core_c.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Inpainting algorithms */
+enum
+{
+ CV_INPAINT_NS =0,
+ CV_INPAINT_TELEA =1
+};
+
+
+/* Inpaints the selected region in the image */
+CVAPI(void) cvInpaint( const CvArr* src, const CvArr* inpaint_mask,
+ CvArr* dst, double inpaintRange, int flags );
+
+
+#ifdef __cplusplus
+} //extern "C"
+#endif
+
+#endif //__OPENCV_PHOTO_C_H__
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/stitching.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/stitching.hpp
new file mode 100644
index 00000000..ed65bb39
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/stitching.hpp
@@ -0,0 +1,43 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#include "opencv2/stitching/stitcher.hpp"
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/stitching/detail/autocalib.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/stitching/detail/autocalib.hpp
new file mode 100644
index 00000000..feb53494
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/stitching/detail/autocalib.hpp
@@ -0,0 +1,65 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_STITCHING_AUTOCALIB_HPP__
+#define __OPENCV_STITCHING_AUTOCALIB_HPP__
+
+#include "opencv2/core/core.hpp"
+#include "matchers.hpp"
+
+namespace cv {
+namespace detail {
+
+// See "Construction of Panoramic Image Mosaics with Global and Local Alignment"
+// by Heung-Yeung Shum and Richard Szeliski.
+void CV_EXPORTS focalsFromHomography(const Mat &H, double &f0, double &f1, bool &f0_ok, bool &f1_ok);
+
+void CV_EXPORTS estimateFocal(const std::vector<ImageFeatures> &features,
+ const std::vector<MatchesInfo> &pairwise_matches,
+ std::vector<double> &focals);
+
+bool CV_EXPORTS calibrateRotatingCamera(const std::vector<Mat> &Hs, Mat &K);
+
+} // namespace detail
+} // namespace cv
+
+#endif // __OPENCV_STITCHING_AUTOCALIB_HPP__
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/stitching/detail/blenders.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/stitching/detail/blenders.hpp
new file mode 100644
index 00000000..bd93a717
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/stitching/detail/blenders.hpp
@@ -0,0 +1,137 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_STITCHING_BLENDERS_HPP__
+#define __OPENCV_STITCHING_BLENDERS_HPP__
+
+#include "opencv2/core/core.hpp"
+
+namespace cv {
+namespace detail {
+
+
+// Simple blender which puts one image over another
+class CV_EXPORTS Blender
+{
+public:
+ virtual ~Blender() {}
+
+ enum { NO, FEATHER, MULTI_BAND };
+ static Ptr<Blender> createDefault(int type, bool try_gpu = false);
+
+ void prepare(const std::vector<Point> &corners, const std::vector<Size> &sizes);
+ virtual void prepare(Rect dst_roi);
+ virtual void feed(const Mat &img, const Mat &mask, Point tl);
+ virtual void blend(Mat &dst, Mat &dst_mask);
+
+protected:
+ Mat dst_, dst_mask_;
+ Rect dst_roi_;
+};
+
+
+class CV_EXPORTS FeatherBlender : public Blender
+{
+public:
+ FeatherBlender(float sharpness = 0.02f);
+
+ float sharpness() const { return sharpness_; }
+ void setSharpness(float val) { sharpness_ = val; }
+
+ void prepare(Rect dst_roi);
+ void feed(const Mat &img, const Mat &mask, Point tl);
+ void blend(Mat &dst, Mat &dst_mask);
+
+ // Creates weight maps for fixed set of source images by their masks and top-left corners.
+ // Final image can be obtained by simple weighting of the source images.
+ Rect createWeightMaps(const std::vector<Mat> &masks, const std::vector<Point> &corners,
+ std::vector<Mat> &weight_maps);
+
+private:
+ float sharpness_;
+ Mat weight_map_;
+ Mat dst_weight_map_;
+};
+
+inline FeatherBlender::FeatherBlender(float _sharpness) { setSharpness(_sharpness); }
+
+
+class CV_EXPORTS MultiBandBlender : public Blender
+{
+public:
+ MultiBandBlender(int try_gpu = false, int num_bands = 5, int weight_type = CV_32F);
+
+ int numBands() const { return actual_num_bands_; }
+ void setNumBands(int val) { actual_num_bands_ = val; }
+
+ void prepare(Rect dst_roi);
+ void feed(const Mat &img, const Mat &mask, Point tl);
+ void blend(Mat &dst, Mat &dst_mask);
+
+private:
+ int actual_num_bands_, num_bands_;
+ std::vector<Mat> dst_pyr_laplace_;
+ std::vector<Mat> dst_band_weights_;
+ Rect dst_roi_final_;
+ bool can_use_gpu_;
+ int weight_type_; //CV_32F or CV_16S
+};
+
+
+//////////////////////////////////////////////////////////////////////////////
+// Auxiliary functions
+
+void CV_EXPORTS normalizeUsingWeightMap(const Mat& weight, Mat& src);
+
+void CV_EXPORTS createWeightMap(const Mat& mask, float sharpness, Mat& weight);
+
+void CV_EXPORTS createLaplacePyr(const Mat &img, int num_levels, std::vector<Mat>& pyr);
+void CV_EXPORTS createLaplacePyrGpu(const Mat &img, int num_levels, std::vector<Mat>& pyr);
+
+// Restores source image
+void CV_EXPORTS restoreImageFromLaplacePyr(std::vector<Mat>& pyr);
+void CV_EXPORTS restoreImageFromLaplacePyrGpu(std::vector<Mat>& pyr);
+
+} // namespace detail
+} // namespace cv
+
+#endif // __OPENCV_STITCHING_BLENDERS_HPP__
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/stitching/detail/camera.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/stitching/detail/camera.hpp
new file mode 100644
index 00000000..a74abcba
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/stitching/detail/camera.hpp
@@ -0,0 +1,69 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_STITCHING_CAMERA_HPP__
+#define __OPENCV_STITCHING_CAMERA_HPP__
+
+#include "opencv2/core/core.hpp"
+
+namespace cv {
+namespace detail {
+
+struct CV_EXPORTS CameraParams
+{
+ CameraParams();
+ CameraParams(const CameraParams& other);
+ const CameraParams& operator =(const CameraParams& other);
+ Mat K() const;
+
+ double focal; // Focal length
+ double aspect; // Aspect ratio
+ double ppx; // Principal point X
+ double ppy; // Principal point Y
+ Mat R; // Rotation
+ Mat t; // Translation
+};
+
+} // namespace detail
+} // namespace cv
+
+#endif // #ifndef __OPENCV_STITCHING_CAMERA_HPP__
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/stitching/detail/exposure_compensate.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/stitching/detail/exposure_compensate.hpp
new file mode 100644
index 00000000..49676fe9
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/stitching/detail/exposure_compensate.hpp
@@ -0,0 +1,106 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_STITCHING_EXPOSURE_COMPENSATE_HPP__
+#define __OPENCV_STITCHING_EXPOSURE_COMPENSATE_HPP__
+
+#include "opencv2/core/core.hpp"
+
+namespace cv {
+namespace detail {
+
+class CV_EXPORTS ExposureCompensator
+{
+public:
+ virtual ~ExposureCompensator() {}
+
+ enum { NO, GAIN, GAIN_BLOCKS };
+ static Ptr<ExposureCompensator> createDefault(int type);
+
+ void feed(const std::vector<Point> &corners, const std::vector<Mat> &images,
+ const std::vector<Mat> &masks);
+ virtual void feed(const std::vector<Point> &corners, const std::vector<Mat> &images,
+ const std::vector<std::pair<Mat,uchar> > &masks) = 0;
+ virtual void apply(int index, Point corner, Mat &image, const Mat &mask) = 0;
+};
+
+
+class CV_EXPORTS NoExposureCompensator : public ExposureCompensator
+{
+public:
+ void feed(const std::vector<Point> &/*corners*/, const std::vector<Mat> &/*images*/,
+ const std::vector<std::pair<Mat,uchar> > &/*masks*/) {};
+ void apply(int /*index*/, Point /*corner*/, Mat &/*image*/, const Mat &/*mask*/) {};
+};
+
+
+class CV_EXPORTS GainCompensator : public ExposureCompensator
+{
+public:
+ void feed(const std::vector<Point> &corners, const std::vector<Mat> &images,
+ const std::vector<std::pair<Mat,uchar> > &masks);
+ void apply(int index, Point corner, Mat &image, const Mat &mask);
+ std::vector<double> gains() const;
+
+private:
+ Mat_<double> gains_;
+};
+
+
+class CV_EXPORTS BlocksGainCompensator : public ExposureCompensator
+{
+public:
+ BlocksGainCompensator(int bl_width = 32, int bl_height = 32)
+ : bl_width_(bl_width), bl_height_(bl_height) {}
+ void feed(const std::vector<Point> &corners, const std::vector<Mat> &images,
+ const std::vector<std::pair<Mat,uchar> > &masks);
+ void apply(int index, Point corner, Mat &image, const Mat &mask);
+
+private:
+ int bl_width_, bl_height_;
+ std::vector<Mat_<float> > gain_maps_;
+};
+
+} // namespace detail
+} // namespace cv
+
+#endif // __OPENCV_STITCHING_EXPOSURE_COMPENSATE_HPP__
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/stitching/detail/matchers.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/stitching/detail/matchers.hpp
new file mode 100644
index 00000000..f319df12
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/stitching/detail/matchers.hpp
@@ -0,0 +1,192 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_STITCHING_MATCHERS_HPP__
+#define __OPENCV_STITCHING_MATCHERS_HPP__
+
+#include "opencv2/core/core.hpp"
+#include "opencv2/core/gpumat.hpp"
+#include "opencv2/features2d/features2d.hpp"
+
+#include "opencv2/opencv_modules.hpp"
+
+#if defined(HAVE_OPENCV_NONFREE)
+ #include "opencv2/nonfree/gpu.hpp"
+#endif
+
+namespace cv {
+namespace detail {
+
+struct CV_EXPORTS ImageFeatures
+{
+ int img_idx;
+ Size img_size;
+ std::vector<KeyPoint> keypoints;
+ Mat descriptors;
+};
+
+
+class CV_EXPORTS FeaturesFinder
+{
+public:
+ virtual ~FeaturesFinder() {}
+ void operator ()(const Mat &image, ImageFeatures &features);
+ void operator ()(const Mat &image, ImageFeatures &features, const std::vector<cv::Rect> &rois);
+ virtual void collectGarbage() {}
+
+protected:
+ virtual void find(const Mat &image, ImageFeatures &features) = 0;
+};
+
+
+class CV_EXPORTS SurfFeaturesFinder : public FeaturesFinder
+{
+public:
+ SurfFeaturesFinder(double hess_thresh = 300., int num_octaves = 3, int num_layers = 4,
+ int num_octaves_descr = /*4*/3, int num_layers_descr = /*2*/4);
+
+private:
+ void find(const Mat &image, ImageFeatures &features);
+
+ Ptr<FeatureDetector> detector_;
+ Ptr<DescriptorExtractor> extractor_;
+ Ptr<Feature2D> surf;
+};
+
+class CV_EXPORTS OrbFeaturesFinder : public FeaturesFinder
+{
+public:
+ OrbFeaturesFinder(Size _grid_size = Size(3,1), int nfeatures=1500, float scaleFactor=1.3f, int nlevels=5);
+
+private:
+ void find(const Mat &image, ImageFeatures &features);
+
+ Ptr<ORB> orb;
+ Size grid_size;
+};
+
+
+#if defined(HAVE_OPENCV_NONFREE)
+class CV_EXPORTS SurfFeaturesFinderGpu : public FeaturesFinder
+{
+public:
+ SurfFeaturesFinderGpu(double hess_thresh = 300., int num_octaves = 3, int num_layers = 4,
+ int num_octaves_descr = 4, int num_layers_descr = 2);
+
+ void collectGarbage();
+
+private:
+ void find(const Mat &image, ImageFeatures &features);
+
+ gpu::GpuMat image_;
+ gpu::GpuMat gray_image_;
+ gpu::SURF_GPU surf_;
+ gpu::GpuMat keypoints_;
+ gpu::GpuMat descriptors_;
+#if defined(HAVE_OPENCV_GPU) && !defined(DYNAMIC_CUDA_SUPPORT)
+ int num_octaves_, num_layers_;
+ int num_octaves_descr_, num_layers_descr_;
+#endif
+};
+#endif
+
+
+struct CV_EXPORTS MatchesInfo
+{
+ MatchesInfo();
+ MatchesInfo(const MatchesInfo &other);
+ const MatchesInfo& operator =(const MatchesInfo &other);
+
+ int src_img_idx, dst_img_idx; // Images indices (optional)
+ std::vector<DMatch> matches;
+ std::vector<uchar> inliers_mask; // Geometrically consistent matches mask
+ int num_inliers; // Number of geometrically consistent matches
+ Mat H; // Estimated homography
+ double confidence; // Confidence two images are from the same panorama
+};
+
+
+class CV_EXPORTS FeaturesMatcher
+{
+public:
+ virtual ~FeaturesMatcher() {}
+
+ void operator ()(const ImageFeatures &features1, const ImageFeatures &features2,
+ MatchesInfo& matches_info) { match(features1, features2, matches_info); }
+
+ void operator ()(const std::vector<ImageFeatures> &features, std::vector<MatchesInfo> &pairwise_matches,
+ const cv::Mat &mask = cv::Mat());
+
+ bool isThreadSafe() const { return is_thread_safe_; }
+
+ virtual void collectGarbage() {}
+
+protected:
+ FeaturesMatcher(bool is_thread_safe = false) : is_thread_safe_(is_thread_safe) {}
+
+ virtual void match(const ImageFeatures &features1, const ImageFeatures &features2,
+ MatchesInfo& matches_info) = 0;
+
+ bool is_thread_safe_;
+};
+
+
+class CV_EXPORTS BestOf2NearestMatcher : public FeaturesMatcher
+{
+public:
+ BestOf2NearestMatcher(bool try_use_gpu = false, float match_conf = 0.3f, int num_matches_thresh1 = 6,
+ int num_matches_thresh2 = 6);
+
+ void collectGarbage();
+
+protected:
+ void match(const ImageFeatures &features1, const ImageFeatures &features2, MatchesInfo &matches_info);
+
+ int num_matches_thresh1_;
+ int num_matches_thresh2_;
+ Ptr<FeaturesMatcher> impl_;
+};
+
+} // namespace detail
+} // namespace cv
+
+#endif // __OPENCV_STITCHING_MATCHERS_HPP__
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/stitching/detail/motion_estimators.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/stitching/detail/motion_estimators.hpp
new file mode 100644
index 00000000..9ae11021
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/stitching/detail/motion_estimators.hpp
@@ -0,0 +1,205 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_STITCHING_MOTION_ESTIMATORS_HPP__
+#define __OPENCV_STITCHING_MOTION_ESTIMATORS_HPP__
+
+#include "opencv2/core/core.hpp"
+#include "matchers.hpp"
+#include "util.hpp"
+#include "camera.hpp"
+
+namespace cv {
+namespace detail {
+
+class CV_EXPORTS Estimator
+{
+public:
+ virtual ~Estimator() {}
+
+ void operator ()(const std::vector<ImageFeatures> &features, const std::vector<MatchesInfo> &pairwise_matches,
+ std::vector<CameraParams> &cameras)
+ { estimate(features, pairwise_matches, cameras); }
+
+protected:
+ virtual void estimate(const std::vector<ImageFeatures> &features, const std::vector<MatchesInfo> &pairwise_matches,
+ std::vector<CameraParams> &cameras) = 0;
+};
+
+
+class CV_EXPORTS HomographyBasedEstimator : public Estimator
+{
+public:
+ HomographyBasedEstimator(bool is_focals_estimated = false)
+ : is_focals_estimated_(is_focals_estimated) {}
+
+private:
+ void estimate(const std::vector<ImageFeatures> &features, const std::vector<MatchesInfo> &pairwise_matches,
+ std::vector<CameraParams> &cameras);
+
+ bool is_focals_estimated_;
+};
+
+
+class CV_EXPORTS BundleAdjusterBase : public Estimator
+{
+public:
+ const Mat refinementMask() const { return refinement_mask_.clone(); }
+ void setRefinementMask(const Mat &mask)
+ {
+ CV_Assert(mask.type() == CV_8U && mask.size() == Size(3, 3));
+ refinement_mask_ = mask.clone();
+ }
+
+ double confThresh() const { return conf_thresh_; }
+ void setConfThresh(double conf_thresh) { conf_thresh_ = conf_thresh; }
+
+ CvTermCriteria termCriteria() { return term_criteria_; }
+ void setTermCriteria(const CvTermCriteria& term_criteria) { term_criteria_ = term_criteria; }
+
+protected:
+ BundleAdjusterBase(int num_params_per_cam, int num_errs_per_measurement)
+ : num_params_per_cam_(num_params_per_cam),
+ num_errs_per_measurement_(num_errs_per_measurement)
+ {
+ setRefinementMask(Mat::ones(3, 3, CV_8U));
+ setConfThresh(1.);
+ setTermCriteria(cvTermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 1000, DBL_EPSILON));
+ }
+
+ // Runs bundle adjustment
+ virtual void estimate(const std::vector<ImageFeatures> &features,
+ const std::vector<MatchesInfo> &pairwise_matches,
+ std::vector<CameraParams> &cameras);
+
+ virtual void setUpInitialCameraParams(const std::vector<CameraParams> &cameras) = 0;
+ virtual void obtainRefinedCameraParams(std::vector<CameraParams> &cameras) const = 0;
+ virtual void calcError(Mat &err) = 0;
+ virtual void calcJacobian(Mat &jac) = 0;
+
+ // 3x3 8U mask, where 0 means don't refine respective parameter, != 0 means refine
+ Mat refinement_mask_;
+
+ int num_images_;
+ int total_num_matches_;
+
+ int num_params_per_cam_;
+ int num_errs_per_measurement_;
+
+ const ImageFeatures *features_;
+ const MatchesInfo *pairwise_matches_;
+
+ // Threshold to filter out poorly matched image pairs
+ double conf_thresh_;
+
+ //Levenberg–Marquardt algorithm termination criteria
+ CvTermCriteria term_criteria_;
+
+ // Camera parameters matrix (CV_64F)
+ Mat cam_params_;
+
+ // Connected images pairs
+ std::vector<std::pair<int,int> > edges_;
+};
+
+
+// Minimizes reprojection error.
+// It can estimate focal length, aspect ratio, principal point.
+// You can affect only on them via the refinement mask.
+class CV_EXPORTS BundleAdjusterReproj : public BundleAdjusterBase
+{
+public:
+ BundleAdjusterReproj() : BundleAdjusterBase(7, 2) {}
+
+private:
+ void setUpInitialCameraParams(const std::vector<CameraParams> &cameras);
+ void obtainRefinedCameraParams(std::vector<CameraParams> &cameras) const;
+ void calcError(Mat &err);
+ void calcJacobian(Mat &jac);
+
+ Mat err1_, err2_;
+};
+
+
+// Minimizes sun of ray-to-ray distances.
+// It can estimate focal length. It ignores the refinement mask for now.
+class CV_EXPORTS BundleAdjusterRay : public BundleAdjusterBase
+{
+public:
+ BundleAdjusterRay() : BundleAdjusterBase(4, 3) {}
+
+private:
+ void setUpInitialCameraParams(const std::vector<CameraParams> &cameras);
+ void obtainRefinedCameraParams(std::vector<CameraParams> &cameras) const;
+ void calcError(Mat &err);
+ void calcJacobian(Mat &jac);
+
+ Mat err1_, err2_;
+};
+
+
+enum WaveCorrectKind
+{
+ WAVE_CORRECT_HORIZ,
+ WAVE_CORRECT_VERT
+};
+
+void CV_EXPORTS waveCorrect(std::vector<Mat> &rmats, WaveCorrectKind kind);
+
+
+//////////////////////////////////////////////////////////////////////////////
+// Auxiliary functions
+
+// Returns matches graph representation in DOT language
+std::string CV_EXPORTS matchesGraphAsString(std::vector<std::string> &pathes, std::vector<MatchesInfo> &pairwise_matches,
+ float conf_threshold);
+
+std::vector<int> CV_EXPORTS leaveBiggestComponent(std::vector<ImageFeatures> &features, std::vector<MatchesInfo> &pairwise_matches,
+ float conf_threshold);
+
+void CV_EXPORTS findMaxSpanningTree(int num_images, const std::vector<MatchesInfo> &pairwise_matches,
+ Graph &span_tree, std::vector<int> &centers);
+
+} // namespace detail
+} // namespace cv
+
+#endif // __OPENCV_STITCHING_MOTION_ESTIMATORS_HPP__
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/stitching/detail/seam_finders.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/stitching/detail/seam_finders.hpp
new file mode 100644
index 00000000..5b1d5d98
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/stitching/detail/seam_finders.hpp
@@ -0,0 +1,267 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_STITCHING_SEAM_FINDERS_HPP__
+#define __OPENCV_STITCHING_SEAM_FINDERS_HPP__
+
+#include <set>
+#include "opencv2/core/core.hpp"
+#include "opencv2/core/gpumat.hpp"
+
+namespace cv {
+namespace detail {
+
+class CV_EXPORTS SeamFinder
+{
+public:
+ virtual ~SeamFinder() {}
+ virtual void find(const std::vector<Mat> &src, const std::vector<Point> &corners,
+ std::vector<Mat> &masks) = 0;
+};
+
+
+class CV_EXPORTS NoSeamFinder : public SeamFinder
+{
+public:
+ void find(const std::vector<Mat>&, const std::vector<Point>&, std::vector<Mat>&) {}
+};
+
+
+class CV_EXPORTS PairwiseSeamFinder : public SeamFinder
+{
+public:
+ virtual void find(const std::vector<Mat> &src, const std::vector<Point> &corners,
+ std::vector<Mat> &masks);
+
+protected:
+ void run();
+ virtual void findInPair(size_t first, size_t second, Rect roi) = 0;
+
+ std::vector<Mat> images_;
+ std::vector<Size> sizes_;
+ std::vector<Point> corners_;
+ std::vector<Mat> masks_;
+};
+
+
+class CV_EXPORTS VoronoiSeamFinder : public PairwiseSeamFinder
+{
+public:
+ virtual void find(const std::vector<Size> &size, const std::vector<Point> &corners,
+ std::vector<Mat> &masks);
+private:
+ void findInPair(size_t first, size_t second, Rect roi);
+};
+
+
+class CV_EXPORTS DpSeamFinder : public SeamFinder
+{
+public:
+ enum CostFunction { COLOR, COLOR_GRAD };
+
+ DpSeamFinder(CostFunction costFunc = COLOR);
+
+ CostFunction costFunction() const { return costFunc_; }
+ void setCostFunction(CostFunction val) { costFunc_ = val; }
+
+ virtual void find(const std::vector<Mat> &src, const std::vector<Point> &corners,
+ std::vector<Mat> &masks);
+
+private:
+ enum ComponentState
+ {
+ FIRST = 1, SECOND = 2, INTERS = 4,
+ INTERS_FIRST = INTERS | FIRST,
+ INTERS_SECOND = INTERS | SECOND
+ };
+
+ class ImagePairLess
+ {
+ public:
+ ImagePairLess(const std::vector<Mat> &images, const std::vector<Point> &corners)
+ : src_(&images[0]), corners_(&corners[0]) {}
+
+ bool operator() (const std::pair<size_t, size_t> &l, const std::pair<size_t, size_t> &r) const
+ {
+ Point c1 = corners_[l.first] + Point(src_[l.first].cols / 2, src_[l.first].rows / 2);
+ Point c2 = corners_[l.second] + Point(src_[l.second].cols / 2, src_[l.second].rows / 2);
+ int d1 = (c1 - c2).dot(c1 - c2);
+
+ c1 = corners_[r.first] + Point(src_[r.first].cols / 2, src_[r.first].rows / 2);
+ c2 = corners_[r.second] + Point(src_[r.second].cols / 2, src_[r.second].rows / 2);
+ int d2 = (c1 - c2).dot(c1 - c2);
+
+ return d1 < d2;
+ }
+
+ private:
+ const Mat *src_;
+ const Point *corners_;
+ };
+
+ class ClosePoints
+ {
+ public:
+ ClosePoints(int minDist) : minDist_(minDist) {}
+
+ bool operator() (const Point &p1, const Point &p2) const
+ {
+ int dist2 = (p1.x-p2.x) * (p1.x-p2.x) + (p1.y-p2.y) * (p1.y-p2.y);
+ return dist2 < minDist_ * minDist_;
+ }
+
+ private:
+ int minDist_;
+ };
+
+ void process(
+ const Mat &image1, const Mat &image2, Point tl1, Point tl2, Mat &mask1, Mat &mask2);
+
+ void findComponents();
+
+ void findEdges();
+
+ void resolveConflicts(
+ const Mat &image1, const Mat &image2, Point tl1, Point tl2, Mat &mask1, Mat &mask2);
+
+ void computeGradients(const Mat &image1, const Mat &image2);
+
+ bool hasOnlyOneNeighbor(int comp);
+
+ bool closeToContour(int y, int x, const Mat_<uchar> &contourMask);
+
+ bool getSeamTips(int comp1, int comp2, Point &p1, Point &p2);
+
+ void computeCosts(
+ const Mat &image1, const Mat &image2, Point tl1, Point tl2,
+ int comp, Mat_<float> &costV, Mat_<float> &costH);
+
+ bool estimateSeam(
+ const Mat &image1, const Mat &image2, Point tl1, Point tl2, int comp,
+ Point p1, Point p2, std::vector<Point> &seam, bool &isHorizontal);
+
+ void updateLabelsUsingSeam(
+ int comp1, int comp2, const std::vector<Point> &seam, bool isHorizontalSeam);
+
+ CostFunction costFunc_;
+
+ // processing images pair data
+ Point unionTl_, unionBr_;
+ Size unionSize_;
+ Mat_<uchar> mask1_, mask2_;
+ Mat_<uchar> contour1mask_, contour2mask_;
+ Mat_<float> gradx1_, grady1_;
+ Mat_<float> gradx2_, grady2_;
+
+ // components data
+ int ncomps_;
+ Mat_<int> labels_;
+ std::vector<ComponentState> states_;
+ std::vector<Point> tls_, brs_;
+ std::vector<std::vector<Point> > contours_;
+ std::set<std::pair<int, int> > edges_;
+};
+
+
+class CV_EXPORTS GraphCutSeamFinderBase
+{
+public:
+ enum { COST_COLOR, COST_COLOR_GRAD };
+};
+
+
+class CV_EXPORTS GraphCutSeamFinder : public GraphCutSeamFinderBase, public SeamFinder
+{
+public:
+ GraphCutSeamFinder(int cost_type = COST_COLOR_GRAD, float terminal_cost = 10000.f,
+ float bad_region_penalty = 1000.f);
+
+ ~GraphCutSeamFinder();
+
+ void find(const std::vector<Mat> &src, const std::vector<Point> &corners,
+ std::vector<Mat> &masks);
+
+private:
+ // To avoid GCGraph dependency
+ class Impl;
+ Ptr<PairwiseSeamFinder> impl_;
+};
+
+
+class CV_EXPORTS GraphCutSeamFinderGpu : public GraphCutSeamFinderBase, public PairwiseSeamFinder
+{
+public:
+ GraphCutSeamFinderGpu(int cost_type = COST_COLOR_GRAD, float terminal_cost = 10000.f,
+ float bad_region_penalty = 1000.f)
+#if defined(HAVE_OPENCV_GPU) && !defined(DYNAMIC_CUDA_SUPPORT)
+ : cost_type_(cost_type),
+ terminal_cost_(terminal_cost),
+ bad_region_penalty_(bad_region_penalty)
+#endif
+ {
+ (void)cost_type;
+ (void)terminal_cost;
+ (void)bad_region_penalty;
+ }
+
+ void find(const std::vector<cv::Mat> &src, const std::vector<cv::Point> &corners,
+ std::vector<cv::Mat> &masks);
+ void findInPair(size_t first, size_t second, Rect roi);
+
+private:
+ void setGraphWeightsColor(const cv::Mat &img1, const cv::Mat &img2, const cv::Mat &mask1, const cv::Mat &mask2,
+ cv::Mat &terminals, cv::Mat &leftT, cv::Mat &rightT, cv::Mat &top, cv::Mat &bottom);
+ void setGraphWeightsColorGrad(const cv::Mat &img1, const cv::Mat &img2, const cv::Mat &dx1, const cv::Mat &dx2,
+ const cv::Mat &dy1, const cv::Mat &dy2, const cv::Mat &mask1, const cv::Mat &mask2,
+ cv::Mat &terminals, cv::Mat &leftT, cv::Mat &rightT, cv::Mat &top, cv::Mat &bottom);
+ std::vector<Mat> dx_, dy_;
+#if defined(HAVE_OPENCV_GPU) && !defined(DYNAMIC_CUDA_SUPPORT)
+ int cost_type_;
+ float terminal_cost_;
+ float bad_region_penalty_;
+#endif
+};
+
+} // namespace detail
+} // namespace cv
+
+#endif // __OPENCV_STITCHING_SEAM_FINDERS_HPP__
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/stitching/detail/util.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/stitching/detail/util.hpp
new file mode 100644
index 00000000..a8ba8161
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/stitching/detail/util.hpp
@@ -0,0 +1,162 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_STITCHING_UTIL_HPP__
+#define __OPENCV_STITCHING_UTIL_HPP__
+
+#include <list>
+#include "opencv2/core/core.hpp"
+
+#define ENABLE_LOG 0
+
+// TODO remove LOG macros, add logging class
+#if ENABLE_LOG
+#ifdef ANDROID
+ #include <iostream>
+ #include <sstream>
+ #include <android/log.h>
+ #define LOG_STITCHING_MSG(msg) \
+ do { \
+ std::stringstream _os; \
+ _os << msg; \
+ __android_log_print(ANDROID_LOG_DEBUG, "STITCHING", "%s", _os.str().c_str()); \
+ } while(0);
+#else
+ #include <iostream>
+ #define LOG_STITCHING_MSG(msg) for(;;) { std::cout << msg; std::cout.flush(); break; }
+#endif
+#else
+ #define LOG_STITCHING_MSG(msg)
+#endif
+
+#define LOG_(_level, _msg) \
+ for(;;) \
+ { \
+ if ((_level) >= ::cv::detail::stitchingLogLevel()) \
+ { \
+ LOG_STITCHING_MSG(_msg); \
+ } \
+ break; \
+ }
+
+
+#define LOG(msg) LOG_(1, msg)
+#define LOG_CHAT(msg) LOG_(0, msg)
+
+#define LOGLN(msg) LOG(msg << std::endl)
+#define LOGLN_CHAT(msg) LOG_CHAT(msg << std::endl)
+
+//#if DEBUG_LOG_CHAT
+// #define LOG_CHAT(msg) LOG(msg)
+// #define LOGLN_CHAT(msg) LOGLN(msg)
+//#else
+// #define LOG_CHAT(msg) do{}while(0)
+// #define LOGLN_CHAT(msg) do{}while(0)
+//#endif
+
+namespace cv {
+namespace detail {
+
+class CV_EXPORTS DisjointSets
+{
+public:
+ DisjointSets(int elem_count = 0) { createOneElemSets(elem_count); }
+
+ void createOneElemSets(int elem_count);
+ int findSetByElem(int elem);
+ int mergeSets(int set1, int set2);
+
+ std::vector<int> parent;
+ std::vector<int> size;
+
+private:
+ std::vector<int> rank_;
+};
+
+
+struct CV_EXPORTS GraphEdge
+{
+ GraphEdge(int from, int to, float weight);
+ bool operator <(const GraphEdge& other) const { return weight < other.weight; }
+ bool operator >(const GraphEdge& other) const { return weight > other.weight; }
+
+ int from, to;
+ float weight;
+};
+
+inline GraphEdge::GraphEdge(int _from, int _to, float _weight) : from(_from), to(_to), weight(_weight) {}
+
+
+class CV_EXPORTS Graph
+{
+public:
+ Graph(int num_vertices = 0) { create(num_vertices); }
+ void create(int num_vertices) { edges_.assign(num_vertices, std::list<GraphEdge>()); }
+ int numVertices() const { return static_cast<int>(edges_.size()); }
+ void addEdge(int from, int to, float weight);
+ template <typename B> B forEach(B body) const;
+ template <typename B> B walkBreadthFirst(int from, B body) const;
+
+private:
+ std::vector< std::list<GraphEdge> > edges_;
+};
+
+
+//////////////////////////////////////////////////////////////////////////////
+// Auxiliary functions
+
+CV_EXPORTS bool overlapRoi(Point tl1, Point tl2, Size sz1, Size sz2, Rect &roi);
+CV_EXPORTS Rect resultRoi(const std::vector<Point> &corners, const std::vector<Mat> &images);
+CV_EXPORTS Rect resultRoi(const std::vector<Point> &corners, const std::vector<Size> &sizes);
+CV_EXPORTS Point resultTl(const std::vector<Point> &corners);
+
+// Returns random 'count' element subset of the {0,1,...,size-1} set
+CV_EXPORTS void selectRandomSubset(int count, int size, std::vector<int> &subset);
+
+CV_EXPORTS int& stitchingLogLevel();
+
+} // namespace detail
+} // namespace cv
+
+#include "util_inl.hpp"
+
+#endif // __OPENCV_STITCHING_UTIL_HPP__
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/stitching/detail/util_inl.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/stitching/detail/util_inl.hpp
new file mode 100644
index 00000000..ab5d94b4
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/stitching/detail/util_inl.hpp
@@ -0,0 +1,127 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_STITCHING_UTIL_INL_HPP__
+#define __OPENCV_STITCHING_UTIL_INL_HPP__
+
+#include <queue>
+#include "opencv2/core/core.hpp"
+#include "util.hpp" // Make your IDE see declarations
+
+namespace cv {
+namespace detail {
+
+template <typename B>
+B Graph::forEach(B body) const
+{
+ for (int i = 0; i < numVertices(); ++i)
+ {
+ std::list<GraphEdge>::const_iterator edge = edges_[i].begin();
+ for (; edge != edges_[i].end(); ++edge)
+ body(*edge);
+ }
+ return body;
+}
+
+
+template <typename B>
+B Graph::walkBreadthFirst(int from, B body) const
+{
+ std::vector<bool> was(numVertices(), false);
+ std::queue<int> vertices;
+
+ was[from] = true;
+ vertices.push(from);
+
+ while (!vertices.empty())
+ {
+ int vertex = vertices.front();
+ vertices.pop();
+
+ std::list<GraphEdge>::const_iterator edge = edges_[vertex].begin();
+ for (; edge != edges_[vertex].end(); ++edge)
+ {
+ if (!was[edge->to])
+ {
+ body(*edge);
+ was[edge->to] = true;
+ vertices.push(edge->to);
+ }
+ }
+ }
+
+ return body;
+}
+
+
+//////////////////////////////////////////////////////////////////////////////
+// Some auxiliary math functions
+
+static inline
+float normL2(const Point3f& a)
+{
+ return a.x * a.x + a.y * a.y + a.z * a.z;
+}
+
+
+static inline
+float normL2(const Point3f& a, const Point3f& b)
+{
+ return normL2(a - b);
+}
+
+
+static inline
+double normL2sq(const Mat &r)
+{
+ return r.dot(r);
+}
+
+
+static inline int sqr(int x) { return x * x; }
+static inline float sqr(float x) { return x * x; }
+static inline double sqr(double x) { return x * x; }
+
+} // namespace detail
+} // namespace cv
+
+#endif // __OPENCV_STITCHING_UTIL_INL_HPP__
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/stitching/detail/warpers.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/stitching/detail/warpers.hpp
new file mode 100644
index 00000000..60d5e541
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/stitching/detail/warpers.hpp
@@ -0,0 +1,510 @@
+ /*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_STITCHING_WARPERS_HPP__
+#define __OPENCV_STITCHING_WARPERS_HPP__
+
+#include "opencv2/core/core.hpp"
+#include "opencv2/core/gpumat.hpp"
+#include "opencv2/imgproc/imgproc.hpp"
+
+namespace cv {
+namespace detail {
+
+class CV_EXPORTS RotationWarper
+{
+public:
+ virtual ~RotationWarper() {}
+
+ virtual Point2f warpPoint(const Point2f &pt, const Mat &K, const Mat &R) = 0;
+
+ virtual Rect buildMaps(Size src_size, const Mat &K, const Mat &R, Mat &xmap, Mat &ymap) = 0;
+
+ virtual Point warp(const Mat &src, const Mat &K, const Mat &R, int interp_mode, int border_mode,
+ Mat &dst) = 0;
+
+ virtual void warpBackward(const Mat &src, const Mat &K, const Mat &R, int interp_mode, int border_mode,
+ Size dst_size, Mat &dst) = 0;
+
+ virtual Rect warpRoi(Size src_size, const Mat &K, const Mat &R) = 0;
+
+ float getScale() const { return 1.f; }
+ void setScale(float) {}
+};
+
+
+struct CV_EXPORTS ProjectorBase
+{
+ void setCameraParams(const Mat &K = Mat::eye(3, 3, CV_32F),
+ const Mat &R = Mat::eye(3, 3, CV_32F),
+ const Mat &T = Mat::zeros(3, 1, CV_32F));
+
+ float scale;
+ float k[9];
+ float rinv[9];
+ float r_kinv[9];
+ float k_rinv[9];
+ float t[3];
+};
+
+
+template <class P>
+class CV_EXPORTS RotationWarperBase : public RotationWarper
+{
+public:
+ Point2f warpPoint(const Point2f &pt, const Mat &K, const Mat &R);
+
+ Rect buildMaps(Size src_size, const Mat &K, const Mat &R, Mat &xmap, Mat &ymap);
+
+ Point warp(const Mat &src, const Mat &K, const Mat &R, int interp_mode, int border_mode,
+ Mat &dst);
+
+ void warpBackward(const Mat &src, const Mat &K, const Mat &R, int interp_mode, int border_mode,
+ Size dst_size, Mat &dst);
+
+ Rect warpRoi(Size src_size, const Mat &K, const Mat &R);
+
+ float getScale() const { return projector_.scale; }
+ void setScale(float val) { projector_.scale = val; }
+
+protected:
+
+ // Detects ROI of the destination image. It's correct for any projection.
+ virtual void detectResultRoi(Size src_size, Point &dst_tl, Point &dst_br);
+
+ // Detects ROI of the destination image by walking over image border.
+ // Correctness for any projection isn't guaranteed.
+ void detectResultRoiByBorder(Size src_size, Point &dst_tl, Point &dst_br);
+
+ P projector_;
+};
+
+
+struct CV_EXPORTS PlaneProjector : ProjectorBase
+{
+ void mapForward(float x, float y, float &u, float &v);
+ void mapBackward(float u, float v, float &x, float &y);
+};
+
+
+class CV_EXPORTS PlaneWarper : public RotationWarperBase<PlaneProjector>
+{
+public:
+ PlaneWarper(float scale = 1.f) { projector_.scale = scale; }
+
+ void setScale(float scale) { projector_.scale = scale; }
+
+ Point2f warpPoint(const Point2f &pt, const Mat &K, const Mat &R, const Mat &T);
+
+ Rect buildMaps(Size src_size, const Mat &K, const Mat &R, const Mat &T, Mat &xmap, Mat &ymap);
+
+ Point warp(const Mat &src, const Mat &K, const Mat &R, const Mat &T, int interp_mode, int border_mode,
+ Mat &dst);
+
+ Rect warpRoi(Size src_size, const Mat &K, const Mat &R, const Mat &T);
+
+protected:
+ void detectResultRoi(Size src_size, Point &dst_tl, Point &dst_br);
+};
+
+
+struct CV_EXPORTS SphericalProjector : ProjectorBase
+{
+ void mapForward(float x, float y, float &u, float &v);
+ void mapBackward(float u, float v, float &x, float &y);
+};
+
+
+// Projects image onto unit sphere with origin at (0, 0, 0).
+// Poles are located at (0, -1, 0) and (0, 1, 0) points.
+class CV_EXPORTS SphericalWarper : public RotationWarperBase<SphericalProjector>
+{
+public:
+ SphericalWarper(float scale) { projector_.scale = scale; }
+
+protected:
+ void detectResultRoi(Size src_size, Point &dst_tl, Point &dst_br);
+};
+
+
+struct CV_EXPORTS CylindricalProjector : ProjectorBase
+{
+ void mapForward(float x, float y, float &u, float &v);
+ void mapBackward(float u, float v, float &x, float &y);
+};
+
+
+// Projects image onto x * x + z * z = 1 cylinder
+class CV_EXPORTS CylindricalWarper : public RotationWarperBase<CylindricalProjector>
+{
+public:
+ CylindricalWarper(float scale) { projector_.scale = scale; }
+
+protected:
+ void detectResultRoi(Size src_size, Point &dst_tl, Point &dst_br)
+ {
+ RotationWarperBase<CylindricalProjector>::detectResultRoiByBorder(src_size, dst_tl, dst_br);
+ }
+};
+
+
+struct CV_EXPORTS FisheyeProjector : ProjectorBase
+{
+ void mapForward(float x, float y, float &u, float &v);
+ void mapBackward(float u, float v, float &x, float &y);
+};
+
+
+class CV_EXPORTS FisheyeWarper : public RotationWarperBase<FisheyeProjector>
+{
+public:
+ FisheyeWarper(float scale) { projector_.scale = scale; }
+};
+
+
+struct CV_EXPORTS StereographicProjector : ProjectorBase
+{
+ void mapForward(float x, float y, float &u, float &v);
+ void mapBackward(float u, float v, float &x, float &y);
+};
+
+
+class CV_EXPORTS StereographicWarper : public RotationWarperBase<StereographicProjector>
+{
+public:
+ StereographicWarper(float scale) { projector_.scale = scale; }
+};
+
+
+struct CV_EXPORTS CompressedRectilinearProjector : ProjectorBase
+{
+ float a, b;
+
+ void mapForward(float x, float y, float &u, float &v);
+ void mapBackward(float u, float v, float &x, float &y);
+};
+
+
+class CV_EXPORTS CompressedRectilinearWarper : public RotationWarperBase<CompressedRectilinearProjector>
+{
+public:
+ CompressedRectilinearWarper(float scale, float A = 1, float B = 1)
+ {
+ projector_.a = A;
+ projector_.b = B;
+ projector_.scale = scale;
+ }
+};
+
+
+struct CV_EXPORTS CompressedRectilinearPortraitProjector : ProjectorBase
+{
+ float a, b;
+
+ void mapForward(float x, float y, float &u, float &v);
+ void mapBackward(float u, float v, float &x, float &y);
+};
+
+
+class CV_EXPORTS CompressedRectilinearPortraitWarper : public RotationWarperBase<CompressedRectilinearPortraitProjector>
+{
+public:
+ CompressedRectilinearPortraitWarper(float scale, float A = 1, float B = 1)
+ {
+ projector_.a = A;
+ projector_.b = B;
+ projector_.scale = scale;
+ }
+};
+
+
+struct CV_EXPORTS PaniniProjector : ProjectorBase
+{
+ float a, b;
+
+ void mapForward(float x, float y, float &u, float &v);
+ void mapBackward(float u, float v, float &x, float &y);
+};
+
+
+class CV_EXPORTS PaniniWarper : public RotationWarperBase<PaniniProjector>
+{
+public:
+ PaniniWarper(float scale, float A = 1, float B = 1)
+ {
+ projector_.a = A;
+ projector_.b = B;
+ projector_.scale = scale;
+ }
+};
+
+
+struct CV_EXPORTS PaniniPortraitProjector : ProjectorBase
+{
+ float a, b;
+
+ void mapForward(float x, float y, float &u, float &v);
+ void mapBackward(float u, float v, float &x, float &y);
+};
+
+
+class CV_EXPORTS PaniniPortraitWarper : public RotationWarperBase<PaniniPortraitProjector>
+{
+public:
+ PaniniPortraitWarper(float scale, float A = 1, float B = 1)
+ {
+ projector_.a = A;
+ projector_.b = B;
+ projector_.scale = scale;
+ }
+
+};
+
+
+struct CV_EXPORTS MercatorProjector : ProjectorBase
+{
+ void mapForward(float x, float y, float &u, float &v);
+ void mapBackward(float u, float v, float &x, float &y);
+};
+
+
+class CV_EXPORTS MercatorWarper : public RotationWarperBase<MercatorProjector>
+{
+public:
+ MercatorWarper(float scale) { projector_.scale = scale; }
+};
+
+
+struct CV_EXPORTS TransverseMercatorProjector : ProjectorBase
+{
+ void mapForward(float x, float y, float &u, float &v);
+ void mapBackward(float u, float v, float &x, float &y);
+};
+
+
+class CV_EXPORTS TransverseMercatorWarper : public RotationWarperBase<TransverseMercatorProjector>
+{
+public:
+ TransverseMercatorWarper(float scale) { projector_.scale = scale; }
+};
+
+
+class CV_EXPORTS PlaneWarperGpu : public PlaneWarper
+{
+public:
+ PlaneWarperGpu(float scale = 1.f) : PlaneWarper(scale) {}
+
+ Rect buildMaps(Size src_size, const Mat &K, const Mat &R, Mat &xmap, Mat &ymap)
+ {
+ Rect result = buildMaps(src_size, K, R, d_xmap_, d_ymap_);
+ d_xmap_.download(xmap);
+ d_ymap_.download(ymap);
+ return result;
+ }
+
+ Rect buildMaps(Size src_size, const Mat &K, const Mat &R, const Mat &T, Mat &xmap, Mat &ymap)
+ {
+ Rect result = buildMaps(src_size, K, R, T, d_xmap_, d_ymap_);
+ d_xmap_.download(xmap);
+ d_ymap_.download(ymap);
+ return result;
+ }
+
+ Point warp(const Mat &src, const Mat &K, const Mat &R, int interp_mode, int border_mode,
+ Mat &dst)
+ {
+ d_src_.upload(src);
+ Point result = warp(d_src_, K, R, interp_mode, border_mode, d_dst_);
+ d_dst_.download(dst);
+ return result;
+ }
+
+ Point warp(const Mat &src, const Mat &K, const Mat &R, const Mat &T, int interp_mode, int border_mode,
+ Mat &dst)
+ {
+ d_src_.upload(src);
+ Point result = warp(d_src_, K, R, T, interp_mode, border_mode, d_dst_);
+ d_dst_.download(dst);
+ return result;
+ }
+
+ Rect buildMaps(Size src_size, const Mat &K, const Mat &R, gpu::GpuMat &xmap, gpu::GpuMat &ymap);
+
+ Rect buildMaps(Size src_size, const Mat &K, const Mat &R, const Mat &T, gpu::GpuMat &xmap, gpu::GpuMat &ymap);
+
+ Point warp(const gpu::GpuMat &src, const Mat &K, const Mat &R, int interp_mode, int border_mode,
+ gpu::GpuMat &dst);
+
+ Point warp(const gpu::GpuMat &src, const Mat &K, const Mat &R, const Mat &T, int interp_mode, int border_mode,
+ gpu::GpuMat &dst);
+
+private:
+ gpu::GpuMat d_xmap_, d_ymap_, d_src_, d_dst_;
+};
+
+
+class CV_EXPORTS SphericalWarperGpu : public SphericalWarper
+{
+public:
+ SphericalWarperGpu(float scale) : SphericalWarper(scale) {}
+
+ Rect buildMaps(Size src_size, const Mat &K, const Mat &R, Mat &xmap, Mat &ymap)
+ {
+ Rect result = buildMaps(src_size, K, R, d_xmap_, d_ymap_);
+ d_xmap_.download(xmap);
+ d_ymap_.download(ymap);
+ return result;
+ }
+
+ Point warp(const Mat &src, const Mat &K, const Mat &R, int interp_mode, int border_mode,
+ Mat &dst)
+ {
+ d_src_.upload(src);
+ Point result = warp(d_src_, K, R, interp_mode, border_mode, d_dst_);
+ d_dst_.download(dst);
+ return result;
+ }
+
+ Rect buildMaps(Size src_size, const Mat &K, const Mat &R, gpu::GpuMat &xmap, gpu::GpuMat &ymap);
+
+ Point warp(const gpu::GpuMat &src, const Mat &K, const Mat &R, int interp_mode, int border_mode,
+ gpu::GpuMat &dst);
+
+private:
+ gpu::GpuMat d_xmap_, d_ymap_, d_src_, d_dst_;
+};
+
+
+class CV_EXPORTS CylindricalWarperGpu : public CylindricalWarper
+{
+public:
+ CylindricalWarperGpu(float scale) : CylindricalWarper(scale) {}
+
+ Rect buildMaps(Size src_size, const Mat &K, const Mat &R, Mat &xmap, Mat &ymap)
+ {
+ Rect result = buildMaps(src_size, K, R, d_xmap_, d_ymap_);
+ d_xmap_.download(xmap);
+ d_ymap_.download(ymap);
+ return result;
+ }
+
+ Point warp(const Mat &src, const Mat &K, const Mat &R, int interp_mode, int border_mode,
+ Mat &dst)
+ {
+ d_src_.upload(src);
+ Point result = warp(d_src_, K, R, interp_mode, border_mode, d_dst_);
+ d_dst_.download(dst);
+ return result;
+ }
+
+ Rect buildMaps(Size src_size, const Mat &K, const Mat &R, gpu::GpuMat &xmap, gpu::GpuMat &ymap);
+
+ Point warp(const gpu::GpuMat &src, const Mat &K, const Mat &R, int interp_mode, int border_mode,
+ gpu::GpuMat &dst);
+
+private:
+ gpu::GpuMat d_xmap_, d_ymap_, d_src_, d_dst_;
+};
+
+
+struct SphericalPortraitProjector : ProjectorBase
+{
+ void mapForward(float x, float y, float &u, float &v);
+ void mapBackward(float u, float v, float &x, float &y);
+};
+
+
+// Projects image onto unit sphere with origin at (0, 0, 0).
+// Poles are located NOT at (0, -1, 0) and (0, 1, 0) points, BUT at (1, 0, 0) and (-1, 0, 0) points.
+class CV_EXPORTS SphericalPortraitWarper : public RotationWarperBase<SphericalPortraitProjector>
+{
+public:
+ SphericalPortraitWarper(float scale) { projector_.scale = scale; }
+
+protected:
+ void detectResultRoi(Size src_size, Point &dst_tl, Point &dst_br);
+};
+
+struct CylindricalPortraitProjector : ProjectorBase
+{
+ void mapForward(float x, float y, float &u, float &v);
+ void mapBackward(float u, float v, float &x, float &y);
+};
+
+
+class CV_EXPORTS CylindricalPortraitWarper : public RotationWarperBase<CylindricalPortraitProjector>
+{
+public:
+ CylindricalPortraitWarper(float scale) { projector_.scale = scale; }
+
+protected:
+ void detectResultRoi(Size src_size, Point &dst_tl, Point &dst_br)
+ {
+ RotationWarperBase<CylindricalPortraitProjector>::detectResultRoiByBorder(src_size, dst_tl, dst_br);
+ }
+};
+
+struct PlanePortraitProjector : ProjectorBase
+{
+ void mapForward(float x, float y, float &u, float &v);
+ void mapBackward(float u, float v, float &x, float &y);
+};
+
+
+class CV_EXPORTS PlanePortraitWarper : public RotationWarperBase<PlanePortraitProjector>
+{
+public:
+ PlanePortraitWarper(float scale) { projector_.scale = scale; }
+
+protected:
+ void detectResultRoi(Size src_size, Point &dst_tl, Point &dst_br)
+ {
+ RotationWarperBase<PlanePortraitProjector>::detectResultRoiByBorder(src_size, dst_tl, dst_br);
+ }
+};
+
+} // namespace detail
+} // namespace cv
+
+#include "warpers_inl.hpp"
+
+#endif // __OPENCV_STITCHING_WARPERS_HPP__
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/stitching/detail/warpers_inl.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/stitching/detail/warpers_inl.hpp
new file mode 100644
index 00000000..b1121232
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/stitching/detail/warpers_inl.hpp
@@ -0,0 +1,765 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_STITCHING_WARPERS_INL_HPP__
+#define __OPENCV_STITCHING_WARPERS_INL_HPP__
+
+#include "opencv2/core/core.hpp"
+#include "warpers.hpp" // Make your IDE see declarations
+
+namespace cv {
+namespace detail {
+
+template <class P>
+Point2f RotationWarperBase<P>::warpPoint(const Point2f &pt, const Mat &K, const Mat &R)
+{
+ projector_.setCameraParams(K, R);
+ Point2f uv;
+ projector_.mapForward(pt.x, pt.y, uv.x, uv.y);
+ return uv;
+}
+
+
+template <class P>
+Rect RotationWarperBase<P>::buildMaps(Size src_size, const Mat &K, const Mat &R, Mat &xmap, Mat &ymap)
+{
+ projector_.setCameraParams(K, R);
+
+ Point dst_tl, dst_br;
+ detectResultRoi(src_size, dst_tl, dst_br);
+
+ xmap.create(dst_br.y - dst_tl.y + 1, dst_br.x - dst_tl.x + 1, CV_32F);
+ ymap.create(dst_br.y - dst_tl.y + 1, dst_br.x - dst_tl.x + 1, CV_32F);
+
+ float x, y;
+ for (int v = dst_tl.y; v <= dst_br.y; ++v)
+ {
+ for (int u = dst_tl.x; u <= dst_br.x; ++u)
+ {
+ projector_.mapBackward(static_cast<float>(u), static_cast<float>(v), x, y);
+ xmap.at<float>(v - dst_tl.y, u - dst_tl.x) = x;
+ ymap.at<float>(v - dst_tl.y, u - dst_tl.x) = y;
+ }
+ }
+
+ return Rect(dst_tl, dst_br);
+}
+
+
+template <class P>
+Point RotationWarperBase<P>::warp(const Mat &src, const Mat &K, const Mat &R, int interp_mode, int border_mode,
+ Mat &dst)
+{
+ Mat xmap, ymap;
+ Rect dst_roi = buildMaps(src.size(), K, R, xmap, ymap);
+
+ dst.create(dst_roi.height + 1, dst_roi.width + 1, src.type());
+ remap(src, dst, xmap, ymap, interp_mode, border_mode);
+
+ return dst_roi.tl();
+}
+
+
+template <class P>
+void RotationWarperBase<P>::warpBackward(const Mat &src, const Mat &K, const Mat &R, int interp_mode, int border_mode,
+ Size dst_size, Mat &dst)
+{
+ projector_.setCameraParams(K, R);
+
+ Point src_tl, src_br;
+ detectResultRoi(dst_size, src_tl, src_br);
+ CV_Assert(src_br.x - src_tl.x + 1 == src.cols && src_br.y - src_tl.y + 1 == src.rows);
+
+ Mat xmap(dst_size, CV_32F);
+ Mat ymap(dst_size, CV_32F);
+
+ float u, v;
+ for (int y = 0; y < dst_size.height; ++y)
+ {
+ for (int x = 0; x < dst_size.width; ++x)
+ {
+ projector_.mapForward(static_cast<float>(x), static_cast<float>(y), u, v);
+ xmap.at<float>(y, x) = u - src_tl.x;
+ ymap.at<float>(y, x) = v - src_tl.y;
+ }
+ }
+
+ dst.create(dst_size, src.type());
+ remap(src, dst, xmap, ymap, interp_mode, border_mode);
+}
+
+
+template <class P>
+Rect RotationWarperBase<P>::warpRoi(Size src_size, const Mat &K, const Mat &R)
+{
+ projector_.setCameraParams(K, R);
+
+ Point dst_tl, dst_br;
+ detectResultRoi(src_size, dst_tl, dst_br);
+
+ return Rect(dst_tl, Point(dst_br.x + 1, dst_br.y + 1));
+}
+
+
+template <class P>
+void RotationWarperBase<P>::detectResultRoi(Size src_size, Point &dst_tl, Point &dst_br)
+{
+ float tl_uf = std::numeric_limits<float>::max();
+ float tl_vf = std::numeric_limits<float>::max();
+ float br_uf = -std::numeric_limits<float>::max();
+ float br_vf = -std::numeric_limits<float>::max();
+
+ float u, v;
+ for (int y = 0; y < src_size.height; ++y)
+ {
+ for (int x = 0; x < src_size.width; ++x)
+ {
+ projector_.mapForward(static_cast<float>(x), static_cast<float>(y), u, v);
+ tl_uf = std::min(tl_uf, u); tl_vf = std::min(tl_vf, v);
+ br_uf = std::max(br_uf, u); br_vf = std::max(br_vf, v);
+ }
+ }
+
+ dst_tl.x = static_cast<int>(tl_uf);
+ dst_tl.y = static_cast<int>(tl_vf);
+ dst_br.x = static_cast<int>(br_uf);
+ dst_br.y = static_cast<int>(br_vf);
+}
+
+
+template <class P>
+void RotationWarperBase<P>::detectResultRoiByBorder(Size src_size, Point &dst_tl, Point &dst_br)
+{
+ float tl_uf = std::numeric_limits<float>::max();
+ float tl_vf = std::numeric_limits<float>::max();
+ float br_uf = -std::numeric_limits<float>::max();
+ float br_vf = -std::numeric_limits<float>::max();
+
+ float u, v;
+ for (float x = 0; x < src_size.width; ++x)
+ {
+ projector_.mapForward(static_cast<float>(x), 0, u, v);
+ tl_uf = std::min(tl_uf, u); tl_vf = std::min(tl_vf, v);
+ br_uf = std::max(br_uf, u); br_vf = std::max(br_vf, v);
+
+ projector_.mapForward(static_cast<float>(x), static_cast<float>(src_size.height - 1), u, v);
+ tl_uf = std::min(tl_uf, u); tl_vf = std::min(tl_vf, v);
+ br_uf = std::max(br_uf, u); br_vf = std::max(br_vf, v);
+ }
+ for (int y = 0; y < src_size.height; ++y)
+ {
+ projector_.mapForward(0, static_cast<float>(y), u, v);
+ tl_uf = std::min(tl_uf, u); tl_vf = std::min(tl_vf, v);
+ br_uf = std::max(br_uf, u); br_vf = std::max(br_vf, v);
+
+ projector_.mapForward(static_cast<float>(src_size.width - 1), static_cast<float>(y), u, v);
+ tl_uf = std::min(tl_uf, u); tl_vf = std::min(tl_vf, v);
+ br_uf = std::max(br_uf, u); br_vf = std::max(br_vf, v);
+ }
+
+ dst_tl.x = static_cast<int>(tl_uf);
+ dst_tl.y = static_cast<int>(tl_vf);
+ dst_br.x = static_cast<int>(br_uf);
+ dst_br.y = static_cast<int>(br_vf);
+}
+
+
+inline
+void PlaneProjector::mapForward(float x, float y, float &u, float &v)
+{
+ float x_ = r_kinv[0] * x + r_kinv[1] * y + r_kinv[2];
+ float y_ = r_kinv[3] * x + r_kinv[4] * y + r_kinv[5];
+ float z_ = r_kinv[6] * x + r_kinv[7] * y + r_kinv[8];
+
+ x_ = t[0] + x_ / z_ * (1 - t[2]);
+ y_ = t[1] + y_ / z_ * (1 - t[2]);
+
+ u = scale * x_;
+ v = scale * y_;
+}
+
+
+inline
+void PlaneProjector::mapBackward(float u, float v, float &x, float &y)
+{
+ u = u / scale - t[0];
+ v = v / scale - t[1];
+
+ float z;
+ x = k_rinv[0] * u + k_rinv[1] * v + k_rinv[2] * (1 - t[2]);
+ y = k_rinv[3] * u + k_rinv[4] * v + k_rinv[5] * (1 - t[2]);
+ z = k_rinv[6] * u + k_rinv[7] * v + k_rinv[8] * (1 - t[2]);
+
+ x /= z;
+ y /= z;
+}
+
+
+inline
+void SphericalProjector::mapForward(float x, float y, float &u, float &v)
+{
+ float x_ = r_kinv[0] * x + r_kinv[1] * y + r_kinv[2];
+ float y_ = r_kinv[3] * x + r_kinv[4] * y + r_kinv[5];
+ float z_ = r_kinv[6] * x + r_kinv[7] * y + r_kinv[8];
+
+ u = scale * atan2f(x_, z_);
+ float w = y_ / sqrtf(x_ * x_ + y_ * y_ + z_ * z_);
+ v = scale * (static_cast<float>(CV_PI) - acosf(w == w ? w : 0));
+}
+
+
+inline
+void SphericalProjector::mapBackward(float u, float v, float &x, float &y)
+{
+ u /= scale;
+ v /= scale;
+
+ float sinv = sinf(static_cast<float>(CV_PI) - v);
+ float x_ = sinv * sinf(u);
+ float y_ = cosf(static_cast<float>(CV_PI) - v);
+ float z_ = sinv * cosf(u);
+
+ float z;
+ x = k_rinv[0] * x_ + k_rinv[1] * y_ + k_rinv[2] * z_;
+ y = k_rinv[3] * x_ + k_rinv[4] * y_ + k_rinv[5] * z_;
+ z = k_rinv[6] * x_ + k_rinv[7] * y_ + k_rinv[8] * z_;
+
+ if (z > 0) { x /= z; y /= z; }
+ else x = y = -1;
+}
+
+
+inline
+void CylindricalProjector::mapForward(float x, float y, float &u, float &v)
+{
+ float x_ = r_kinv[0] * x + r_kinv[1] * y + r_kinv[2];
+ float y_ = r_kinv[3] * x + r_kinv[4] * y + r_kinv[5];
+ float z_ = r_kinv[6] * x + r_kinv[7] * y + r_kinv[8];
+
+ u = scale * atan2f(x_, z_);
+ v = scale * y_ / sqrtf(x_ * x_ + z_ * z_);
+}
+
+
+inline
+void CylindricalProjector::mapBackward(float u, float v, float &x, float &y)
+{
+ u /= scale;
+ v /= scale;
+
+ float x_ = sinf(u);
+ float y_ = v;
+ float z_ = cosf(u);
+
+ float z;
+ x = k_rinv[0] * x_ + k_rinv[1] * y_ + k_rinv[2] * z_;
+ y = k_rinv[3] * x_ + k_rinv[4] * y_ + k_rinv[5] * z_;
+ z = k_rinv[6] * x_ + k_rinv[7] * y_ + k_rinv[8] * z_;
+
+ if (z > 0) { x /= z; y /= z; }
+ else x = y = -1;
+}
+
+inline
+void FisheyeProjector::mapForward(float x, float y, float &u, float &v)
+{
+ float x_ = r_kinv[0] * x + r_kinv[1] * y + r_kinv[2];
+ float y_ = r_kinv[3] * x + r_kinv[4] * y + r_kinv[5];
+ float z_ = r_kinv[6] * x + r_kinv[7] * y + r_kinv[8];
+
+ float u_ = atan2f(x_, z_);
+ float v_ = (float)CV_PI - acosf(y_ / sqrtf(x_ * x_ + y_ * y_ + z_ * z_));
+
+ u = scale * v_ * cosf(u_);
+ v = scale * v_ * sinf(u_);
+}
+
+inline
+void FisheyeProjector::mapBackward(float u, float v, float &x, float &y)
+{
+ u /= scale;
+ v /= scale;
+
+ float u_ = atan2f(v, u);
+ float v_ = sqrtf(u*u + v*v);
+
+ float sinv = sinf((float)CV_PI - v_);
+ float x_ = sinv * sinf(u_);
+ float y_ = cosf((float)CV_PI - v_);
+ float z_ = sinv * cosf(u_);
+
+ float z;
+ x = k_rinv[0] * x_ + k_rinv[1] * y_ + k_rinv[2] * z_;
+ y = k_rinv[3] * x_ + k_rinv[4] * y_ + k_rinv[5] * z_;
+ z = k_rinv[6] * x_ + k_rinv[7] * y_ + k_rinv[8] * z_;
+
+ if (z > 0) { x /= z; y /= z; }
+ else x = y = -1;
+}
+
+inline
+void StereographicProjector::mapForward(float x, float y, float &u, float &v)
+{
+ float x_ = r_kinv[0] * x + r_kinv[1] * y + r_kinv[2];
+ float y_ = r_kinv[3] * x + r_kinv[4] * y + r_kinv[5];
+ float z_ = r_kinv[6] * x + r_kinv[7] * y + r_kinv[8];
+
+ float u_ = atan2f(x_, z_);
+ float v_ = (float)CV_PI - acosf(y_ / sqrtf(x_ * x_ + y_ * y_ + z_ * z_));
+
+ float r = sinf(v_) / (1 - cosf(v_));
+
+ u = scale * r * cos(u_);
+ v = scale * r * sin(u_);
+}
+
+inline
+void StereographicProjector::mapBackward(float u, float v, float &x, float &y)
+{
+ u /= scale;
+ v /= scale;
+
+ float u_ = atan2f(v, u);
+ float r = sqrtf(u*u + v*v);
+ float v_ = 2 * atanf(1.f / r);
+
+ float sinv = sinf((float)CV_PI - v_);
+ float x_ = sinv * sinf(u_);
+ float y_ = cosf((float)CV_PI - v_);
+ float z_ = sinv * cosf(u_);
+
+ float z;
+ x = k_rinv[0] * x_ + k_rinv[1] * y_ + k_rinv[2] * z_;
+ y = k_rinv[3] * x_ + k_rinv[4] * y_ + k_rinv[5] * z_;
+ z = k_rinv[6] * x_ + k_rinv[7] * y_ + k_rinv[8] * z_;
+
+ if (z > 0) { x /= z; y /= z; }
+ else x = y = -1;
+}
+
+inline
+void CompressedRectilinearProjector::mapForward(float x, float y, float &u, float &v)
+{
+ float x_ = r_kinv[0] * x + r_kinv[1] * y + r_kinv[2];
+ float y_ = r_kinv[3] * x + r_kinv[4] * y + r_kinv[5];
+ float z_ = r_kinv[6] * x + r_kinv[7] * y + r_kinv[8];
+
+ float u_ = atan2f(x_, z_);
+ float v_ = asinf(y_ / sqrtf(x_ * x_ + y_ * y_ + z_ * z_));
+
+ u = scale * a * tanf(u_ / a);
+ v = scale * b * tanf(v_) / cosf(u_);
+}
+
+inline
+void CompressedRectilinearProjector::mapBackward(float u, float v, float &x, float &y)
+{
+ u /= scale;
+ v /= scale;
+
+ float aatg = a * atanf(u / a);
+ float u_ = aatg;
+ float v_ = atanf(v * cosf(aatg) / b);
+
+ float cosv = cosf(v_);
+ float x_ = cosv * sinf(u_);
+ float y_ = sinf(v_);
+ float z_ = cosv * cosf(u_);
+
+ float z;
+ x = k_rinv[0] * x_ + k_rinv[1] * y_ + k_rinv[2] * z_;
+ y = k_rinv[3] * x_ + k_rinv[4] * y_ + k_rinv[5] * z_;
+ z = k_rinv[6] * x_ + k_rinv[7] * y_ + k_rinv[8] * z_;
+
+ if (z > 0) { x /= z; y /= z; }
+ else x = y = -1;
+}
+
+inline
+void CompressedRectilinearPortraitProjector::mapForward(float x, float y, float &u, float &v)
+{
+ float y_ = r_kinv[0] * x + r_kinv[1] * y + r_kinv[2];
+ float x_ = r_kinv[3] * x + r_kinv[4] * y + r_kinv[5];
+ float z_ = r_kinv[6] * x + r_kinv[7] * y + r_kinv[8];
+
+ float u_ = atan2f(x_, z_);
+ float v_ = asinf(y_ / sqrtf(x_ * x_ + y_ * y_ + z_ * z_));
+
+ u = - scale * a * tanf(u_ / a);
+ v = scale * b * tanf(v_) / cosf(u_);
+}
+
+inline
+void CompressedRectilinearPortraitProjector::mapBackward(float u, float v, float &x, float &y)
+{
+ u /= - scale;
+ v /= scale;
+
+ float aatg = a * atanf(u / a);
+ float u_ = aatg;
+ float v_ = atanf(v * cosf( aatg ) / b);
+
+ float cosv = cosf(v_);
+ float y_ = cosv * sinf(u_);
+ float x_ = sinf(v_);
+ float z_ = cosv * cosf(u_);
+
+ float z;
+ x = k_rinv[0] * x_ + k_rinv[1] * y_ + k_rinv[2] * z_;
+ y = k_rinv[3] * x_ + k_rinv[4] * y_ + k_rinv[5] * z_;
+ z = k_rinv[6] * x_ + k_rinv[7] * y_ + k_rinv[8] * z_;
+
+ if (z > 0) { x /= z; y /= z; }
+ else x = y = -1;
+}
+
+inline
+void PaniniProjector::mapForward(float x, float y, float &u, float &v)
+{
+ float x_ = r_kinv[0] * x + r_kinv[1] * y + r_kinv[2];
+ float y_ = r_kinv[3] * x + r_kinv[4] * y + r_kinv[5];
+ float z_ = r_kinv[6] * x + r_kinv[7] * y + r_kinv[8];
+
+ float u_ = atan2f(x_, z_);
+ float v_ = asinf(y_ / sqrtf(x_ * x_ + y_ * y_ + z_ * z_));
+
+ float tg = a * tanf(u_ / a);
+ u = scale * tg;
+
+ float sinu = sinf(u_);
+ if ( fabs(sinu) < 1E-7 )
+ v = scale * b * tanf(v_);
+ else
+ v = scale * b * tg * tanf(v_) / sinu;
+}
+
+inline
+void PaniniProjector::mapBackward(float u, float v, float &x, float &y)
+{
+ u /= scale;
+ v /= scale;
+
+ float lamda = a * atanf(u / a);
+ float u_ = lamda;
+
+ float v_;
+ if ( fabs(lamda) > 1E-7)
+ v_ = atanf(v * sinf(lamda) / (b * a * tanf(lamda / a)));
+ else
+ v_ = atanf(v / b);
+
+ float cosv = cosf(v_);
+ float x_ = cosv * sinf(u_);
+ float y_ = sinf(v_);
+ float z_ = cosv * cosf(u_);
+
+ float z;
+ x = k_rinv[0] * x_ + k_rinv[1] * y_ + k_rinv[2] * z_;
+ y = k_rinv[3] * x_ + k_rinv[4] * y_ + k_rinv[5] * z_;
+ z = k_rinv[6] * x_ + k_rinv[7] * y_ + k_rinv[8] * z_;
+
+ if (z > 0) { x /= z; y /= z; }
+ else x = y = -1;
+}
+
+inline
+void PaniniPortraitProjector::mapForward(float x, float y, float &u, float &v)
+{
+ float y_ = r_kinv[0] * x + r_kinv[1] * y + r_kinv[2];
+ float x_ = r_kinv[3] * x + r_kinv[4] * y + r_kinv[5];
+ float z_ = r_kinv[6] * x + r_kinv[7] * y + r_kinv[8];
+
+ float u_ = atan2f(x_, z_);
+ float v_ = asinf(y_ / sqrtf(x_ * x_ + y_ * y_ + z_ * z_));
+
+ float tg = a * tanf(u_ / a);
+ u = - scale * tg;
+
+ float sinu = sinf( u_ );
+ if ( fabs(sinu) < 1E-7 )
+ v = scale * b * tanf(v_);
+ else
+ v = scale * b * tg * tanf(v_) / sinu;
+}
+
+inline
+void PaniniPortraitProjector::mapBackward(float u, float v, float &x, float &y)
+{
+ u /= - scale;
+ v /= scale;
+
+ float lamda = a * atanf(u / a);
+ float u_ = lamda;
+
+ float v_;
+ if ( fabs(lamda) > 1E-7)
+ v_ = atanf(v * sinf(lamda) / (b * a * tanf(lamda/a)));
+ else
+ v_ = atanf(v / b);
+
+ float cosv = cosf(v_);
+ float y_ = cosv * sinf(u_);
+ float x_ = sinf(v_);
+ float z_ = cosv * cosf(u_);
+
+ float z;
+ x = k_rinv[0] * x_ + k_rinv[1] * y_ + k_rinv[2] * z_;
+ y = k_rinv[3] * x_ + k_rinv[4] * y_ + k_rinv[5] * z_;
+ z = k_rinv[6] * x_ + k_rinv[7] * y_ + k_rinv[8] * z_;
+
+ if (z > 0) { x /= z; y /= z; }
+ else x = y = -1;
+}
+
+inline
+void MercatorProjector::mapForward(float x, float y, float &u, float &v)
+{
+ float x_ = r_kinv[0] * x + r_kinv[1] * y + r_kinv[2];
+ float y_ = r_kinv[3] * x + r_kinv[4] * y + r_kinv[5];
+ float z_ = r_kinv[6] * x + r_kinv[7] * y + r_kinv[8];
+
+ float u_ = atan2f(x_, z_);
+ float v_ = asinf(y_ / sqrtf(x_ * x_ + y_ * y_ + z_ * z_));
+
+ u = scale * u_;
+ v = scale * logf( tanf( (float)(CV_PI/4) + v_/2 ) );
+}
+
+inline
+void MercatorProjector::mapBackward(float u, float v, float &x, float &y)
+{
+ u /= scale;
+ v /= scale;
+
+ float v_ = atanf( sinhf(v) );
+ float u_ = u;
+
+ float cosv = cosf(v_);
+ float x_ = cosv * sinf(u_);
+ float y_ = sinf(v_);
+ float z_ = cosv * cosf(u_);
+
+ float z;
+ x = k_rinv[0] * x_ + k_rinv[1] * y_ + k_rinv[2] * z_;
+ y = k_rinv[3] * x_ + k_rinv[4] * y_ + k_rinv[5] * z_;
+ z = k_rinv[6] * x_ + k_rinv[7] * y_ + k_rinv[8] * z_;
+
+ if (z > 0) { x /= z; y /= z; }
+ else x = y = -1;
+}
+
+inline
+void TransverseMercatorProjector::mapForward(float x, float y, float &u, float &v)
+{
+ float x_ = r_kinv[0] * x + r_kinv[1] * y + r_kinv[2];
+ float y_ = r_kinv[3] * x + r_kinv[4] * y + r_kinv[5];
+ float z_ = r_kinv[6] * x + r_kinv[7] * y + r_kinv[8];
+
+ float u_ = atan2f(x_, z_);
+ float v_ = asinf(y_ / sqrtf(x_ * x_ + y_ * y_ + z_ * z_));
+
+ float B = cosf(v_) * sinf(u_);
+
+ u = scale / 2 * logf( (1+B) / (1-B) );
+ v = scale * atan2f(tanf(v_), cosf(u_));
+}
+
+inline
+void TransverseMercatorProjector::mapBackward(float u, float v, float &x, float &y)
+{
+ u /= scale;
+ v /= scale;
+
+ float v_ = asinf( sinf(v) / coshf(u) );
+ float u_ = atan2f( sinhf(u), cos(v) );
+
+ float cosv = cosf(v_);
+ float x_ = cosv * sinf(u_);
+ float y_ = sinf(v_);
+ float z_ = cosv * cosf(u_);
+
+ float z;
+ x = k_rinv[0] * x_ + k_rinv[1] * y_ + k_rinv[2] * z_;
+ y = k_rinv[3] * x_ + k_rinv[4] * y_ + k_rinv[5] * z_;
+ z = k_rinv[6] * x_ + k_rinv[7] * y_ + k_rinv[8] * z_;
+
+ if (z > 0) { x /= z; y /= z; }
+ else x = y = -1;
+}
+
+inline
+void SphericalPortraitProjector::mapForward(float x, float y, float &u0, float &v0)
+{
+ float x0_ = r_kinv[0] * x + r_kinv[1] * y + r_kinv[2];
+ float y0_ = r_kinv[3] * x + r_kinv[4] * y + r_kinv[5];
+ float z_ = r_kinv[6] * x + r_kinv[7] * y + r_kinv[8];
+
+ float x_ = y0_;
+ float y_ = x0_;
+ float u, v;
+
+ u = scale * atan2f(x_, z_);
+ v = scale * (static_cast<float>(CV_PI) - acosf(y_ / sqrtf(x_ * x_ + y_ * y_ + z_ * z_)));
+
+ u0 = -u;//v;
+ v0 = v;//u;
+}
+
+
+inline
+void SphericalPortraitProjector::mapBackward(float u0, float v0, float &x, float &y)
+{
+ float u, v;
+ u = -u0;//v0;
+ v = v0;//u0;
+
+ u /= scale;
+ v /= scale;
+
+ float sinv = sinf(static_cast<float>(CV_PI) - v);
+ float x0_ = sinv * sinf(u);
+ float y0_ = cosf(static_cast<float>(CV_PI) - v);
+ float z_ = sinv * cosf(u);
+
+ float x_ = y0_;
+ float y_ = x0_;
+
+ float z;
+ x = k_rinv[0] * x_ + k_rinv[1] * y_ + k_rinv[2] * z_;
+ y = k_rinv[3] * x_ + k_rinv[4] * y_ + k_rinv[5] * z_;
+ z = k_rinv[6] * x_ + k_rinv[7] * y_ + k_rinv[8] * z_;
+
+ if (z > 0) { x /= z; y /= z; }
+ else x = y = -1;
+}
+
+inline
+void CylindricalPortraitProjector::mapForward(float x, float y, float &u0, float &v0)
+{
+ float x0_ = r_kinv[0] * x + r_kinv[1] * y + r_kinv[2];
+ float y0_ = r_kinv[3] * x + r_kinv[4] * y + r_kinv[5];
+ float z_ = r_kinv[6] * x + r_kinv[7] * y + r_kinv[8];
+
+ float x_ = y0_;
+ float y_ = x0_;
+ float u, v;
+
+ u = scale * atan2f(x_, z_);
+ v = scale * y_ / sqrtf(x_ * x_ + z_ * z_);
+
+ u0 = -u;//v;
+ v0 = v;//u;
+}
+
+
+inline
+void CylindricalPortraitProjector::mapBackward(float u0, float v0, float &x, float &y)
+{
+ float u, v;
+ u = -u0;//v0;
+ v = v0;//u0;
+
+ u /= scale;
+ v /= scale;
+
+ float x0_ = sinf(u);
+ float y0_ = v;
+ float z_ = cosf(u);
+
+ float x_ = y0_;
+ float y_ = x0_;
+
+ float z;
+ x = k_rinv[0] * x_ + k_rinv[1] * y_ + k_rinv[2] * z_;
+ y = k_rinv[3] * x_ + k_rinv[4] * y_ + k_rinv[5] * z_;
+ z = k_rinv[6] * x_ + k_rinv[7] * y_ + k_rinv[8] * z_;
+
+ if (z > 0) { x /= z; y /= z; }
+ else x = y = -1;
+}
+
+inline
+void PlanePortraitProjector::mapForward(float x, float y, float &u0, float &v0)
+{
+ float x0_ = r_kinv[0] * x + r_kinv[1] * y + r_kinv[2];
+ float y0_ = r_kinv[3] * x + r_kinv[4] * y + r_kinv[5];
+ float z_ = r_kinv[6] * x + r_kinv[7] * y + r_kinv[8];
+
+ float x_ = y0_;
+ float y_ = x0_;
+
+ x_ = t[0] + x_ / z_ * (1 - t[2]);
+ y_ = t[1] + y_ / z_ * (1 - t[2]);
+
+ float u,v;
+ u = scale * x_;
+ v = scale * y_;
+
+ u0 = -u;
+ v0 = v;
+}
+
+
+inline
+void PlanePortraitProjector::mapBackward(float u0, float v0, float &x, float &y)
+{
+ float u, v;
+ u = -u0;
+ v = v0;
+
+ u = u / scale - t[0];
+ v = v / scale - t[1];
+
+ float z;
+ x = k_rinv[0] * v + k_rinv[1] * u + k_rinv[2] * (1 - t[2]);
+ y = k_rinv[3] * v + k_rinv[4] * u + k_rinv[5] * (1 - t[2]);
+ z = k_rinv[6] * v + k_rinv[7] * u + k_rinv[8] * (1 - t[2]);
+
+ x /= z;
+ y /= z;
+}
+
+
+} // namespace detail
+} // namespace cv
+
+#endif // __OPENCV_STITCHING_WARPERS_INL_HPP__
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/stitching/stitcher.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/stitching/stitcher.hpp
new file mode 100644
index 00000000..e274deb1
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/stitching/stitcher.hpp
@@ -0,0 +1,174 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_STITCHING_STITCHER_HPP__
+#define __OPENCV_STITCHING_STITCHER_HPP__
+
+#include "opencv2/core/core.hpp"
+#include "opencv2/features2d/features2d.hpp"
+#include "opencv2/stitching/warpers.hpp"
+#include "opencv2/stitching/detail/matchers.hpp"
+#include "opencv2/stitching/detail/motion_estimators.hpp"
+#include "opencv2/stitching/detail/exposure_compensate.hpp"
+#include "opencv2/stitching/detail/seam_finders.hpp"
+#include "opencv2/stitching/detail/blenders.hpp"
+#include "opencv2/stitching/detail/camera.hpp"
+
+namespace cv {
+
+class CV_EXPORTS Stitcher
+{
+public:
+ enum { ORIG_RESOL = -1 };
+ enum Status { OK, ERR_NEED_MORE_IMGS };
+
+ // Creates stitcher with default parameters
+ static Stitcher createDefault(bool try_use_gpu = false);
+
+ double registrationResol() const { return registr_resol_; }
+ void setRegistrationResol(double resol_mpx) { registr_resol_ = resol_mpx; }
+
+ double seamEstimationResol() const { return seam_est_resol_; }
+ void setSeamEstimationResol(double resol_mpx) { seam_est_resol_ = resol_mpx; }
+
+ double compositingResol() const { return compose_resol_; }
+ void setCompositingResol(double resol_mpx) { compose_resol_ = resol_mpx; }
+
+ double panoConfidenceThresh() const { return conf_thresh_; }
+ void setPanoConfidenceThresh(double conf_thresh) { conf_thresh_ = conf_thresh; }
+
+ bool waveCorrection() const { return do_wave_correct_; }
+ void setWaveCorrection(bool flag) { do_wave_correct_ = flag; }
+
+ detail::WaveCorrectKind waveCorrectKind() const { return wave_correct_kind_; }
+ void setWaveCorrectKind(detail::WaveCorrectKind kind) { wave_correct_kind_ = kind; }
+
+ Ptr<detail::FeaturesFinder> featuresFinder() { return features_finder_; }
+ const Ptr<detail::FeaturesFinder> featuresFinder() const { return features_finder_; }
+ void setFeaturesFinder(Ptr<detail::FeaturesFinder> features_finder)
+ { features_finder_ = features_finder; }
+
+ Ptr<detail::FeaturesMatcher> featuresMatcher() { return features_matcher_; }
+ const Ptr<detail::FeaturesMatcher> featuresMatcher() const { return features_matcher_; }
+ void setFeaturesMatcher(Ptr<detail::FeaturesMatcher> features_matcher)
+ { features_matcher_ = features_matcher; }
+
+ const cv::Mat& matchingMask() const { return matching_mask_; }
+ void setMatchingMask(const cv::Mat &mask)
+ {
+ CV_Assert(mask.type() == CV_8U && mask.cols == mask.rows);
+ matching_mask_ = mask.clone();
+ }
+
+ Ptr<detail::BundleAdjusterBase> bundleAdjuster() { return bundle_adjuster_; }
+ const Ptr<detail::BundleAdjusterBase> bundleAdjuster() const { return bundle_adjuster_; }
+ void setBundleAdjuster(Ptr<detail::BundleAdjusterBase> bundle_adjuster)
+ { bundle_adjuster_ = bundle_adjuster; }
+
+ Ptr<WarperCreator> warper() { return warper_; }
+ const Ptr<WarperCreator> warper() const { return warper_; }
+ void setWarper(Ptr<WarperCreator> creator) { warper_ = creator; }
+
+ Ptr<detail::ExposureCompensator> exposureCompensator() { return exposure_comp_; }
+ const Ptr<detail::ExposureCompensator> exposureCompensator() const { return exposure_comp_; }
+ void setExposureCompensator(Ptr<detail::ExposureCompensator> exposure_comp)
+ { exposure_comp_ = exposure_comp; }
+
+ Ptr<detail::SeamFinder> seamFinder() { return seam_finder_; }
+ const Ptr<detail::SeamFinder> seamFinder() const { return seam_finder_; }
+ void setSeamFinder(Ptr<detail::SeamFinder> seam_finder) { seam_finder_ = seam_finder; }
+
+ Ptr<detail::Blender> blender() { return blender_; }
+ const Ptr<detail::Blender> blender() const { return blender_; }
+ void setBlender(Ptr<detail::Blender> b) { blender_ = b; }
+
+ Status estimateTransform(InputArray images);
+ Status estimateTransform(InputArray images, const std::vector<std::vector<Rect> > &rois);
+
+ Status composePanorama(OutputArray pano);
+ Status composePanorama(InputArray images, OutputArray pano);
+
+ Status stitch(InputArray images, OutputArray pano);
+ Status stitch(InputArray images, const std::vector<std::vector<Rect> > &rois, OutputArray pano);
+
+ std::vector<int> component() const { return indices_; }
+ std::vector<detail::CameraParams> cameras() const { return cameras_; }
+ double workScale() const { return work_scale_; }
+
+private:
+ Stitcher() {}
+
+ Status matchImages();
+ void estimateCameraParams();
+
+ double registr_resol_;
+ double seam_est_resol_;
+ double compose_resol_;
+ double conf_thresh_;
+ Ptr<detail::FeaturesFinder> features_finder_;
+ Ptr<detail::FeaturesMatcher> features_matcher_;
+ cv::Mat matching_mask_;
+ Ptr<detail::BundleAdjusterBase> bundle_adjuster_;
+ bool do_wave_correct_;
+ detail::WaveCorrectKind wave_correct_kind_;
+ Ptr<WarperCreator> warper_;
+ Ptr<detail::ExposureCompensator> exposure_comp_;
+ Ptr<detail::SeamFinder> seam_finder_;
+ Ptr<detail::Blender> blender_;
+
+ std::vector<cv::Mat> imgs_;
+ std::vector<std::vector<cv::Rect> > rois_;
+ std::vector<cv::Size> full_img_sizes_;
+ std::vector<detail::ImageFeatures> features_;
+ std::vector<detail::MatchesInfo> pairwise_matches_;
+ std::vector<cv::Mat> seam_est_imgs_;
+ std::vector<int> indices_;
+ std::vector<detail::CameraParams> cameras_;
+ double work_scale_;
+ double seam_scale_;
+ double seam_work_aspect_;
+ double warped_image_scale_;
+};
+
+} // namespace cv
+
+#endif // __OPENCV_STITCHING_STITCHER_HPP__
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/stitching/warpers.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/stitching/warpers.hpp
new file mode 100644
index 00000000..11e012ff
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/stitching/warpers.hpp
@@ -0,0 +1,170 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_STITCHING_WARPER_CREATORS_HPP__
+#define __OPENCV_STITCHING_WARPER_CREATORS_HPP__
+
+#include "opencv2/stitching/detail/warpers.hpp"
+
+namespace cv {
+
+class WarperCreator
+{
+public:
+ virtual ~WarperCreator() {}
+ virtual Ptr<detail::RotationWarper> create(float scale) const = 0;
+};
+
+
+class PlaneWarper : public WarperCreator
+{
+public:
+ Ptr<detail::RotationWarper> create(float scale) const { return new detail::PlaneWarper(scale); }
+};
+
+
+class CylindricalWarper: public WarperCreator
+{
+public:
+ Ptr<detail::RotationWarper> create(float scale) const { return new detail::CylindricalWarper(scale); }
+};
+
+
+class SphericalWarper: public WarperCreator
+{
+public:
+ Ptr<detail::RotationWarper> create(float scale) const { return new detail::SphericalWarper(scale); }
+};
+
+class FisheyeWarper : public WarperCreator
+{
+public:
+ Ptr<detail::RotationWarper> create(float scale) const { return new detail::FisheyeWarper(scale); }
+};
+
+class StereographicWarper: public WarperCreator
+{
+public:
+ Ptr<detail::RotationWarper> create(float scale) const { return new detail::StereographicWarper(scale); }
+};
+
+class CompressedRectilinearWarper: public WarperCreator
+{
+ float a, b;
+public:
+ CompressedRectilinearWarper(float A = 1, float B = 1)
+ {
+ a = A; b = B;
+ }
+ Ptr<detail::RotationWarper> create(float scale) const { return new detail::CompressedRectilinearWarper(scale, a, b); }
+};
+
+class CompressedRectilinearPortraitWarper: public WarperCreator
+{
+ float a, b;
+public:
+ CompressedRectilinearPortraitWarper(float A = 1, float B = 1)
+ {
+ a = A; b = B;
+ }
+ Ptr<detail::RotationWarper> create(float scale) const { return new detail::CompressedRectilinearPortraitWarper(scale, a, b); }
+};
+
+class PaniniWarper: public WarperCreator
+{
+ float a, b;
+public:
+ PaniniWarper(float A = 1, float B = 1)
+ {
+ a = A; b = B;
+ }
+ Ptr<detail::RotationWarper> create(float scale) const { return new detail::PaniniWarper(scale, a, b); }
+};
+
+class PaniniPortraitWarper: public WarperCreator
+{
+ float a, b;
+public:
+ PaniniPortraitWarper(float A = 1, float B = 1)
+ {
+ a = A; b = B;
+ }
+ Ptr<detail::RotationWarper> create(float scale) const { return new detail::PaniniPortraitWarper(scale, a, b); }
+};
+
+class MercatorWarper: public WarperCreator
+{
+public:
+ Ptr<detail::RotationWarper> create(float scale) const { return new detail::MercatorWarper(scale); }
+};
+
+class TransverseMercatorWarper: public WarperCreator
+{
+public:
+ Ptr<detail::RotationWarper> create(float scale) const { return new detail::TransverseMercatorWarper(scale); }
+};
+
+
+
+class PlaneWarperGpu: public WarperCreator
+{
+public:
+ Ptr<detail::RotationWarper> create(float scale) const { return new detail::PlaneWarperGpu(scale); }
+};
+
+
+class CylindricalWarperGpu: public WarperCreator
+{
+public:
+ Ptr<detail::RotationWarper> create(float scale) const { return new detail::CylindricalWarperGpu(scale); }
+};
+
+
+class SphericalWarperGpu: public WarperCreator
+{
+public:
+ Ptr<detail::RotationWarper> create(float scale) const { return new detail::SphericalWarperGpu(scale); }
+};
+
+} // namespace cv
+
+#endif // __OPENCV_STITCHING_WARPER_CREATORS_HPP__
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/superres.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/superres.hpp
new file mode 100644
index 00000000..8c344ca4
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/superres.hpp
@@ -0,0 +1,43 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#include "opencv2/superres/superres.hpp"
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/superres/optical_flow.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/superres/optical_flow.hpp
new file mode 100644
index 00000000..d51ce793
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/superres/optical_flow.hpp
@@ -0,0 +1,76 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_SUPERRES_OPTICAL_FLOW_HPP__
+#define __OPENCV_SUPERRES_OPTICAL_FLOW_HPP__
+
+#include "opencv2/core/core.hpp"
+
+namespace cv
+{
+ namespace superres
+ {
+ class CV_EXPORTS DenseOpticalFlowExt : public cv::Algorithm
+ {
+ public:
+ virtual void calc(InputArray frame0, InputArray frame1, OutputArray flow1, OutputArray flow2 = noArray()) = 0;
+ virtual void collectGarbage() = 0;
+ };
+
+ CV_EXPORTS Ptr<DenseOpticalFlowExt> createOptFlow_Farneback();
+ CV_EXPORTS Ptr<DenseOpticalFlowExt> createOptFlow_Farneback_GPU();
+ CV_EXPORTS Ptr<DenseOpticalFlowExt> createOptFlow_Farneback_OCL();
+
+ CV_EXPORTS Ptr<DenseOpticalFlowExt> createOptFlow_Simple();
+
+ CV_EXPORTS Ptr<DenseOpticalFlowExt> createOptFlow_DualTVL1();
+ CV_EXPORTS Ptr<DenseOpticalFlowExt> createOptFlow_DualTVL1_GPU();
+ CV_EXPORTS Ptr<DenseOpticalFlowExt> createOptFlow_DualTVL1_OCL();
+
+ CV_EXPORTS Ptr<DenseOpticalFlowExt> createOptFlow_Brox_GPU();
+
+ CV_EXPORTS Ptr<DenseOpticalFlowExt> createOptFlow_PyrLK_GPU();
+ CV_EXPORTS Ptr<DenseOpticalFlowExt> createOptFlow_PyrLK_OCL();
+ }
+}
+
+#endif // __OPENCV_SUPERRES_OPTICAL_FLOW_HPP__
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/superres/superres.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/superres/superres.hpp
new file mode 100644
index 00000000..8daeb5ba
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/superres/superres.hpp
@@ -0,0 +1,99 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_SUPERRES_HPP__
+#define __OPENCV_SUPERRES_HPP__
+
+#include "opencv2/core/core.hpp"
+
+namespace cv
+{
+ namespace superres
+ {
+ CV_EXPORTS bool initModule_superres();
+
+ class CV_EXPORTS FrameSource
+ {
+ public:
+ virtual ~FrameSource();
+
+ virtual void nextFrame(OutputArray frame) = 0;
+ virtual void reset() = 0;
+ };
+
+ CV_EXPORTS Ptr<FrameSource> createFrameSource_Empty();
+
+ CV_EXPORTS Ptr<FrameSource> createFrameSource_Video(const std::string& fileName);
+ CV_EXPORTS Ptr<FrameSource> createFrameSource_Video_GPU(const std::string& fileName);
+
+ CV_EXPORTS Ptr<FrameSource> createFrameSource_Camera(int deviceId = 0);
+
+ class CV_EXPORTS SuperResolution : public cv::Algorithm, public FrameSource
+ {
+ public:
+ void setInput(const Ptr<FrameSource>& frameSource);
+
+ void nextFrame(OutputArray frame);
+ void reset();
+
+ virtual void collectGarbage();
+
+ protected:
+ SuperResolution();
+
+ virtual void initImpl(Ptr<FrameSource>& frameSource) = 0;
+ virtual void processImpl(Ptr<FrameSource>& frameSource, OutputArray output) = 0;
+
+ private:
+ Ptr<FrameSource> frameSource_;
+ bool firstCall_;
+ };
+
+ // S. Farsiu , D. Robinson, M. Elad, P. Milanfar. Fast and robust multiframe super resolution.
+ // Dennis Mitzel, Thomas Pock, Thomas Schoenemann, Daniel Cremers. Video Super Resolution using Duality Based TV-L1 Optical Flow.
+ CV_EXPORTS Ptr<SuperResolution> createSuperResolution_BTVL1();
+ CV_EXPORTS Ptr<SuperResolution> createSuperResolution_BTVL1_GPU();
+ CV_EXPORTS Ptr<SuperResolution> createSuperResolution_BTVL1_OCL();
+ }
+}
+
+#endif // __OPENCV_SUPERRES_HPP__
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/ts.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/ts.hpp
new file mode 100644
index 00000000..e96b101e
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/ts.hpp
@@ -0,0 +1,43 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#include "opencv2/ts/ts.hpp"
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/ts/gpu_perf.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/ts/gpu_perf.hpp
new file mode 100644
index 00000000..87f1737c
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/ts/gpu_perf.hpp
@@ -0,0 +1,115 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_GPU_PERF_UTILITY_HPP__
+#define __OPENCV_GPU_PERF_UTILITY_HPP__
+
+#include "opencv2/core/core.hpp"
+#include "opencv2/highgui/highgui.hpp"
+#include "opencv2/imgproc/imgproc.hpp"
+#include "opencv2/ts/ts_perf.hpp"
+
+namespace perf
+{
+#ifdef OPENCV_TINY_GPU_MODULE
+ #define ALL_BORDER_MODES testing::Values(BorderMode(cv::BORDER_REFLECT101), BorderMode(cv::BORDER_REPLICATE), BorderMode(cv::BORDER_CONSTANT), BorderMode(cv::BORDER_REFLECT))
+ #define ALL_INTERPOLATIONS testing::Values(Interpolation(cv::INTER_NEAREST), Interpolation(cv::INTER_LINEAR), Interpolation(cv::INTER_AREA))
+#else
+ #define ALL_BORDER_MODES BorderMode::all()
+ #define ALL_INTERPOLATIONS Interpolation::all()
+#endif
+
+ CV_ENUM(BorderMode, BORDER_REFLECT101, BORDER_REPLICATE, BORDER_CONSTANT, BORDER_REFLECT, BORDER_WRAP)
+ CV_ENUM(Interpolation, INTER_NEAREST, INTER_LINEAR, INTER_CUBIC, INTER_AREA)
+ CV_ENUM(NormType, NORM_INF, NORM_L1, NORM_L2, NORM_HAMMING, NORM_MINMAX)
+
+ enum { Gray = 1, TwoChannel = 2, BGR = 3, BGRA = 4 };
+ CV_ENUM(MatCn, Gray, TwoChannel, BGR, BGRA)
+
+ #define GPU_CHANNELS_1_3_4 testing::Values(MatCn(Gray), MatCn(BGR), MatCn(BGRA))
+ #define GPU_CHANNELS_1_3 testing::Values(MatCn(Gray), MatCn(BGR))
+
+ #define GET_PARAM(k) std::tr1::get< k >(GetParam())
+
+ #define DEF_PARAM_TEST(name, ...) typedef ::perf::TestBaseWithParam< std::tr1::tuple< __VA_ARGS__ > > name
+ #define DEF_PARAM_TEST_1(name, param_type) typedef ::perf::TestBaseWithParam< param_type > name
+
+ DEF_PARAM_TEST_1(Sz, cv::Size);
+ typedef ::perf::Size_MatType Sz_Type;
+ DEF_PARAM_TEST(Sz_Depth, cv::Size, ::perf::MatDepth);
+ DEF_PARAM_TEST(Sz_Depth_Cn, cv::Size, ::perf::MatDepth, MatCn);
+
+ #define GPU_TYPICAL_MAT_SIZES testing::Values(perf::sz720p, perf::szSXGA, perf::sz1080p)
+
+ #define FAIL_NO_CPU() FAIL() << "No such CPU implementation analogy"
+
+ #define GPU_SANITY_CHECK(mat, ...) \
+ do{ \
+ cv::Mat gpu_##mat(mat); \
+ SANITY_CHECK(gpu_##mat, ## __VA_ARGS__); \
+ } while(0)
+
+ #define CPU_SANITY_CHECK(mat, ...) \
+ do{ \
+ cv::Mat cpu_##mat(mat); \
+ SANITY_CHECK(cpu_##mat, ## __VA_ARGS__); \
+ } while(0)
+
+ CV_EXPORTS cv::Mat readImage(const std::string& fileName, int flags = cv::IMREAD_COLOR);
+
+ struct CvtColorInfo
+ {
+ int scn;
+ int dcn;
+ int code;
+
+ CvtColorInfo() {}
+ explicit CvtColorInfo(int scn_, int dcn_, int code_) : scn(scn_), dcn(dcn_), code(code_) {}
+ };
+ CV_EXPORTS void PrintTo(const CvtColorInfo& info, std::ostream* os);
+
+ CV_EXPORTS void printCudaInfo();
+
+ CV_EXPORTS void sortKeyPoints(std::vector<cv::KeyPoint>& keypoints, cv::InputOutputArray _descriptors = cv::noArray());
+}
+
+#endif // __OPENCV_GPU_PERF_UTILITY_HPP__
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/ts/gpu_test.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/ts/gpu_test.hpp
new file mode 100644
index 00000000..01737bc9
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/ts/gpu_test.hpp
@@ -0,0 +1,360 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_GPU_TEST_UTILITY_HPP__
+#define __OPENCV_GPU_TEST_UTILITY_HPP__
+
+#include "opencv2/core/core.hpp"
+#include "opencv2/core/gpumat.hpp"
+#include "opencv2/highgui/highgui.hpp"
+#include "opencv2/imgproc/imgproc.hpp"
+#include "opencv2/ts/ts.hpp"
+#include "opencv2/ts/ts_perf.hpp"
+
+namespace cvtest
+{
+ //////////////////////////////////////////////////////////////////////
+ // random generators
+
+ CV_EXPORTS int randomInt(int minVal, int maxVal);
+ CV_EXPORTS double randomDouble(double minVal, double maxVal);
+ CV_EXPORTS cv::Size randomSize(int minVal, int maxVal);
+ CV_EXPORTS cv::Scalar randomScalar(double minVal, double maxVal);
+ CV_EXPORTS cv::Mat randomMat(cv::Size size, int type, double minVal = 0.0, double maxVal = 255.0);
+
+ //////////////////////////////////////////////////////////////////////
+ // GpuMat create
+
+ CV_EXPORTS cv::gpu::GpuMat createMat(cv::Size size, int type, bool useRoi = false);
+ CV_EXPORTS cv::gpu::GpuMat loadMat(const cv::Mat& m, bool useRoi = false);
+
+ //////////////////////////////////////////////////////////////////////
+ // Image load
+
+ //! read image from testdata folder
+ CV_EXPORTS cv::Mat readImage(const std::string& fileName, int flags = cv::IMREAD_COLOR);
+
+ //! read image from testdata folder and convert it to specified type
+ CV_EXPORTS cv::Mat readImageType(const std::string& fname, int type);
+
+ //////////////////////////////////////////////////////////////////////
+ // Gpu devices
+
+ //! return true if device supports specified feature and gpu module was built with support the feature.
+ CV_EXPORTS bool supportFeature(const cv::gpu::DeviceInfo& info, cv::gpu::FeatureSet feature);
+
+ class CV_EXPORTS DeviceManager
+ {
+ public:
+ static DeviceManager& instance();
+
+ void load(int i);
+ void loadAll();
+
+ const std::vector<cv::gpu::DeviceInfo>& values() const { return devices_; }
+
+ private:
+ std::vector<cv::gpu::DeviceInfo> devices_;
+ };
+
+ #define ALL_DEVICES testing::ValuesIn(cvtest::DeviceManager::instance().values())
+
+ //////////////////////////////////////////////////////////////////////
+ // Additional assertion
+
+ CV_EXPORTS void minMaxLocGold(const cv::Mat& src, double* minVal_, double* maxVal_ = 0, cv::Point* minLoc_ = 0, cv::Point* maxLoc_ = 0, const cv::Mat& mask = cv::Mat());
+
+ CV_EXPORTS cv::Mat getMat(cv::InputArray arr);
+
+ CV_EXPORTS testing::AssertionResult assertMatNear(const char* expr1, const char* expr2, const char* eps_expr, cv::InputArray m1, cv::InputArray m2, double eps);
+
+ #define EXPECT_MAT_NEAR(m1, m2, eps) EXPECT_PRED_FORMAT3(cvtest::assertMatNear, m1, m2, eps)
+ #define ASSERT_MAT_NEAR(m1, m2, eps) ASSERT_PRED_FORMAT3(cvtest::assertMatNear, m1, m2, eps)
+
+ #define EXPECT_SCALAR_NEAR(s1, s2, eps) \
+ { \
+ EXPECT_NEAR(s1[0], s2[0], eps); \
+ EXPECT_NEAR(s1[1], s2[1], eps); \
+ EXPECT_NEAR(s1[2], s2[2], eps); \
+ EXPECT_NEAR(s1[3], s2[3], eps); \
+ }
+ #define ASSERT_SCALAR_NEAR(s1, s2, eps) \
+ { \
+ ASSERT_NEAR(s1[0], s2[0], eps); \
+ ASSERT_NEAR(s1[1], s2[1], eps); \
+ ASSERT_NEAR(s1[2], s2[2], eps); \
+ ASSERT_NEAR(s1[3], s2[3], eps); \
+ }
+
+ #define EXPECT_POINT2_NEAR(p1, p2, eps) \
+ { \
+ EXPECT_NEAR(p1.x, p2.x, eps); \
+ EXPECT_NEAR(p1.y, p2.y, eps); \
+ }
+ #define ASSERT_POINT2_NEAR(p1, p2, eps) \
+ { \
+ ASSERT_NEAR(p1.x, p2.x, eps); \
+ ASSERT_NEAR(p1.y, p2.y, eps); \
+ }
+
+ #define EXPECT_POINT3_NEAR(p1, p2, eps) \
+ { \
+ EXPECT_NEAR(p1.x, p2.x, eps); \
+ EXPECT_NEAR(p1.y, p2.y, eps); \
+ EXPECT_NEAR(p1.z, p2.z, eps); \
+ }
+ #define ASSERT_POINT3_NEAR(p1, p2, eps) \
+ { \
+ ASSERT_NEAR(p1.x, p2.x, eps); \
+ ASSERT_NEAR(p1.y, p2.y, eps); \
+ ASSERT_NEAR(p1.z, p2.z, eps); \
+ }
+
+ CV_EXPORTS double checkSimilarity(cv::InputArray m1, cv::InputArray m2);
+
+ #define EXPECT_MAT_SIMILAR(mat1, mat2, eps) \
+ { \
+ ASSERT_EQ(mat1.type(), mat2.type()); \
+ ASSERT_EQ(mat1.size(), mat2.size()); \
+ EXPECT_LE(checkSimilarity(mat1, mat2), eps); \
+ }
+ #define ASSERT_MAT_SIMILAR(mat1, mat2, eps) \
+ { \
+ ASSERT_EQ(mat1.type(), mat2.type()); \
+ ASSERT_EQ(mat1.size(), mat2.size()); \
+ ASSERT_LE(checkSimilarity(mat1, mat2), eps); \
+ }
+
+ //////////////////////////////////////////////////////////////////////
+ // Helper structs for value-parameterized tests
+
+ #define GPU_TEST_P(test_case_name, test_name) \
+ class GTEST_TEST_CLASS_NAME_(test_case_name, test_name) \
+ : public test_case_name { \
+ public: \
+ GTEST_TEST_CLASS_NAME_(test_case_name, test_name)() {} \
+ virtual void TestBody(); \
+ private: \
+ void UnsafeTestBody(); \
+ static int AddToRegistry() { \
+ ::testing::UnitTest::GetInstance()->parameterized_test_registry(). \
+ GetTestCasePatternHolder<test_case_name>(\
+ #test_case_name, __FILE__, __LINE__)->AddTestPattern(\
+ #test_case_name, \
+ #test_name, \
+ new ::testing::internal::TestMetaFactory< \
+ GTEST_TEST_CLASS_NAME_(test_case_name, test_name)>()); \
+ return 0; \
+ } \
+ static int gtest_registering_dummy_; \
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(\
+ GTEST_TEST_CLASS_NAME_(test_case_name, test_name)); \
+ }; \
+ int GTEST_TEST_CLASS_NAME_(test_case_name, \
+ test_name)::gtest_registering_dummy_ = \
+ GTEST_TEST_CLASS_NAME_(test_case_name, test_name)::AddToRegistry(); \
+ void GTEST_TEST_CLASS_NAME_(test_case_name, test_name)::TestBody() \
+ { \
+ try \
+ { \
+ UnsafeTestBody(); \
+ } \
+ catch (...) \
+ { \
+ cv::gpu::resetDevice(); \
+ throw; \
+ } \
+ } \
+ void GTEST_TEST_CLASS_NAME_(test_case_name, test_name)::UnsafeTestBody()
+
+ #define PARAM_TEST_CASE(name, ...) struct name : testing::TestWithParam< std::tr1::tuple< __VA_ARGS__ > >
+ #define GET_PARAM(k) std::tr1::get< k >(GetParam())
+
+ #define DIFFERENT_SIZES testing::Values(cv::Size(128, 128), cv::Size(113, 113))
+
+ // Depth
+
+ using perf::MatDepth;
+
+#ifdef OPENCV_TINY_GPU_MODULE
+ #define ALL_DEPTH testing::Values(MatDepth(CV_8U), MatDepth(CV_32F))
+
+ #define DEPTH_PAIRS testing::Values(std::make_pair(MatDepth(CV_8U), MatDepth(CV_8U)), \
+ std::make_pair(MatDepth(CV_32F), MatDepth(CV_32F)))
+#else
+ #define ALL_DEPTH testing::Values(MatDepth(CV_8U), MatDepth(CV_8S), MatDepth(CV_16U), MatDepth(CV_16S), MatDepth(CV_32S), MatDepth(CV_32F), MatDepth(CV_64F))
+
+ #define DEPTH_PAIRS testing::Values(std::make_pair(MatDepth(CV_8U), MatDepth(CV_8U)), \
+ std::make_pair(MatDepth(CV_8U), MatDepth(CV_16U)), \
+ std::make_pair(MatDepth(CV_8U), MatDepth(CV_16S)), \
+ std::make_pair(MatDepth(CV_8U), MatDepth(CV_32S)), \
+ std::make_pair(MatDepth(CV_8U), MatDepth(CV_32F)), \
+ std::make_pair(MatDepth(CV_8U), MatDepth(CV_64F)), \
+ \
+ std::make_pair(MatDepth(CV_16U), MatDepth(CV_16U)), \
+ std::make_pair(MatDepth(CV_16U), MatDepth(CV_32S)), \
+ std::make_pair(MatDepth(CV_16U), MatDepth(CV_32F)), \
+ std::make_pair(MatDepth(CV_16U), MatDepth(CV_64F)), \
+ \
+ std::make_pair(MatDepth(CV_16S), MatDepth(CV_16S)), \
+ std::make_pair(MatDepth(CV_16S), MatDepth(CV_32S)), \
+ std::make_pair(MatDepth(CV_16S), MatDepth(CV_32F)), \
+ std::make_pair(MatDepth(CV_16S), MatDepth(CV_64F)), \
+ \
+ std::make_pair(MatDepth(CV_32S), MatDepth(CV_32S)), \
+ std::make_pair(MatDepth(CV_32S), MatDepth(CV_32F)), \
+ std::make_pair(MatDepth(CV_32S), MatDepth(CV_64F)), \
+ \
+ std::make_pair(MatDepth(CV_32F), MatDepth(CV_32F)), \
+ std::make_pair(MatDepth(CV_32F), MatDepth(CV_64F)), \
+ \
+ std::make_pair(MatDepth(CV_64F), MatDepth(CV_64F)))
+#endif
+
+ // Type
+
+ using perf::MatType;
+
+ //! return vector with types from specified range.
+ CV_EXPORTS std::vector<MatType> types(int depth_start, int depth_end, int cn_start, int cn_end);
+
+ //! return vector with all types (depth: CV_8U-CV_64F, channels: 1-4).
+ CV_EXPORTS const std::vector<MatType>& all_types();
+
+ #define ALL_TYPES testing::ValuesIn(all_types())
+ #define TYPES(depth_start, depth_end, cn_start, cn_end) testing::ValuesIn(types(depth_start, depth_end, cn_start, cn_end))
+
+ // ROI
+
+ class UseRoi
+ {
+ public:
+ inline UseRoi(bool val = false) : val_(val) {}
+
+ inline operator bool() const { return val_; }
+
+ private:
+ bool val_;
+ };
+
+ CV_EXPORTS void PrintTo(const UseRoi& useRoi, std::ostream* os);
+
+ #define WHOLE_SUBMAT testing::Values(UseRoi(false), UseRoi(true))
+
+ // Direct/Inverse
+
+ class Inverse
+ {
+ public:
+ inline Inverse(bool val = false) : val_(val) {}
+
+ inline operator bool() const { return val_; }
+
+ private:
+ bool val_;
+ };
+
+ CV_EXPORTS void PrintTo(const Inverse& useRoi, std::ostream* os);
+
+ #define DIRECT_INVERSE testing::Values(Inverse(false), Inverse(true))
+
+ // Param class
+
+ #define IMPLEMENT_PARAM_CLASS(name, type) \
+ class name \
+ { \
+ public: \
+ name ( type arg = type ()) : val_(arg) {} \
+ operator type () const {return val_;} \
+ private: \
+ type val_; \
+ }; \
+ inline void PrintTo( name param, std::ostream* os) \
+ { \
+ *os << #name << "(" << testing::PrintToString(static_cast< type >(param)) << ")"; \
+ }
+
+ IMPLEMENT_PARAM_CLASS(Channels, int)
+
+ #define ALL_CHANNELS testing::Values(Channels(1), Channels(2), Channels(3), Channels(4))
+ #define IMAGE_CHANNELS testing::Values(Channels(1), Channels(3), Channels(4))
+
+ // Flags and enums
+
+ CV_ENUM(NormCode, NORM_INF, NORM_L1, NORM_L2, NORM_TYPE_MASK, NORM_RELATIVE, NORM_MINMAX)
+
+ CV_ENUM(Interpolation, INTER_NEAREST, INTER_LINEAR, INTER_CUBIC, INTER_AREA)
+
+ CV_ENUM(BorderType, BORDER_REFLECT101, BORDER_REPLICATE, BORDER_CONSTANT, BORDER_REFLECT, BORDER_WRAP)
+#ifdef OPENCV_TINY_GPU_MODULE
+ #define ALL_BORDER_TYPES testing::Values(BorderType(cv::BORDER_REFLECT101), BorderType(cv::BORDER_REPLICATE), BorderType(cv::BORDER_CONSTANT), BorderType(cv::BORDER_REFLECT))
+#else
+ #define ALL_BORDER_TYPES testing::Values(BorderType(cv::BORDER_REFLECT101), BorderType(cv::BORDER_REPLICATE), BorderType(cv::BORDER_CONSTANT), BorderType(cv::BORDER_REFLECT), BorderType(cv::BORDER_WRAP))
+#endif
+
+ CV_FLAGS(WarpFlags, INTER_NEAREST, INTER_LINEAR, INTER_CUBIC, WARP_INVERSE_MAP)
+
+ //////////////////////////////////////////////////////////////////////
+ // Features2D
+
+ CV_EXPORTS testing::AssertionResult assertKeyPointsEquals(const char* gold_expr, const char* actual_expr, std::vector<cv::KeyPoint>& gold, std::vector<cv::KeyPoint>& actual);
+
+ #define ASSERT_KEYPOINTS_EQ(gold, actual) EXPECT_PRED_FORMAT2(assertKeyPointsEquals, gold, actual)
+
+ CV_EXPORTS int getMatchedPointsCount(std::vector<cv::KeyPoint>& gold, std::vector<cv::KeyPoint>& actual);
+ CV_EXPORTS int getMatchedPointsCount(const std::vector<cv::KeyPoint>& keypoints1, const std::vector<cv::KeyPoint>& keypoints2, const std::vector<cv::DMatch>& matches);
+
+ //////////////////////////////////////////////////////////////////////
+ // Other
+
+ CV_EXPORTS void dumpImage(const std::string& fileName, const cv::Mat& image);
+ CV_EXPORTS void showDiff(cv::InputArray gold, cv::InputArray actual, double eps);
+
+ CV_EXPORTS void printCudaInfo();
+}
+
+namespace cv { namespace gpu
+{
+ CV_EXPORTS void PrintTo(const DeviceInfo& info, std::ostream* os);
+}}
+
+#endif // __OPENCV_GPU_TEST_UTILITY_HPP__
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/ts/ts.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/ts/ts.hpp
new file mode 100644
index 00000000..d3c17657
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/ts/ts.hpp
@@ -0,0 +1,638 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_GTESTCV_HPP__
+#define __OPENCV_GTESTCV_HPP__
+
+#include <stdarg.h> // for va_list
+
+#ifdef HAVE_WINRT
+ #pragma warning(disable:4447) // Disable warning 'main' signature found without threading model
+#endif
+
+#ifdef _MSC_VER
+#pragma warning( disable: 4127 )
+#endif
+
+#define GTEST_DONT_DEFINE_FAIL 0
+#define GTEST_DONT_DEFINE_SUCCEED 0
+#define GTEST_DONT_DEFINE_ASSERT_EQ 0
+#define GTEST_DONT_DEFINE_ASSERT_NE 0
+#define GTEST_DONT_DEFINE_ASSERT_LE 0
+#define GTEST_DONT_DEFINE_ASSERT_LT 0
+#define GTEST_DONT_DEFINE_ASSERT_GE 0
+#define GTEST_DONT_DEFINE_ASSERT_GT 0
+#define GTEST_DONT_DEFINE_TEST 0
+
+#include "opencv2/ts/ts_gtest.h"
+
+#ifndef GTEST_USES_SIMPLE_RE
+# define GTEST_USES_SIMPLE_RE 0
+#endif
+#ifndef GTEST_USES_POSIX_RE
+# define GTEST_USES_POSIX_RE 0
+#endif
+
+#include "opencv2/core/core.hpp"
+
+namespace cvtest
+{
+
+using std::vector;
+using std::string;
+using cv::RNG;
+using cv::Mat;
+using cv::Scalar;
+using cv::Size;
+using cv::Point;
+using cv::Rect;
+
+class CV_EXPORTS TS;
+
+CV_EXPORTS int64 readSeed(const char* str);
+
+CV_EXPORTS void randUni( RNG& rng, Mat& a, const Scalar& param1, const Scalar& param2 );
+
+inline unsigned randInt( RNG& rng )
+{
+ return (unsigned)rng;
+}
+
+inline double randReal( RNG& rng )
+{
+ return (double)rng;
+}
+
+
+CV_EXPORTS const char* getTypeName( int type );
+CV_EXPORTS int typeByName( const char* type_name );
+
+CV_EXPORTS string vec2str(const string& sep, const int* v, size_t nelems);
+
+inline int clipInt( int val, int min_val, int max_val )
+{
+ if( val < min_val )
+ val = min_val;
+ if( val > max_val )
+ val = max_val;
+ return val;
+}
+
+CV_EXPORTS double getMinVal(int depth);
+CV_EXPORTS double getMaxVal(int depth);
+
+CV_EXPORTS Size randomSize(RNG& rng, double maxSizeLog);
+CV_EXPORTS void randomSize(RNG& rng, int minDims, int maxDims, double maxSizeLog, vector<int>& sz);
+CV_EXPORTS int randomType(RNG& rng, int typeMask, int minChannels, int maxChannels);
+CV_EXPORTS Mat randomMat(RNG& rng, Size size, int type, double minVal, double maxVal, bool useRoi);
+CV_EXPORTS Mat randomMat(RNG& rng, const vector<int>& size, int type, double minVal, double maxVal, bool useRoi);
+CV_EXPORTS void add(const Mat& a, double alpha, const Mat& b, double beta,
+ Scalar gamma, Mat& c, int ctype, bool calcAbs=false);
+CV_EXPORTS void multiply(const Mat& a, const Mat& b, Mat& c, double alpha=1);
+CV_EXPORTS void divide(const Mat& a, const Mat& b, Mat& c, double alpha=1);
+
+CV_EXPORTS void convert(const Mat& src, Mat& dst, int dtype, double alpha=1, double beta=0);
+CV_EXPORTS void copy(const Mat& src, Mat& dst, const Mat& mask=Mat(), bool invertMask=false);
+CV_EXPORTS void set(Mat& dst, const Scalar& gamma, const Mat& mask=Mat());
+
+// working with multi-channel arrays
+CV_EXPORTS void extract( const Mat& a, Mat& plane, int coi );
+CV_EXPORTS void insert( const Mat& plane, Mat& a, int coi );
+
+// checks that the array does not have NaNs and/or Infs and all the elements are
+// within [min_val,max_val). idx is the index of the first "bad" element.
+CV_EXPORTS int check( const Mat& data, double min_val, double max_val, vector<int>* idx );
+
+// modifies values that are close to zero
+CV_EXPORTS void patchZeros( Mat& mat, double level );
+
+CV_EXPORTS void transpose(const Mat& src, Mat& dst);
+CV_EXPORTS void erode(const Mat& src, Mat& dst, const Mat& _kernel, Point anchor=Point(-1,-1),
+ int borderType=IPL_BORDER_CONSTANT, const Scalar& borderValue=Scalar());
+CV_EXPORTS void dilate(const Mat& src, Mat& dst, const Mat& _kernel, Point anchor=Point(-1,-1),
+ int borderType=IPL_BORDER_CONSTANT, const Scalar& borderValue=Scalar());
+CV_EXPORTS void filter2D(const Mat& src, Mat& dst, int ddepth, const Mat& kernel,
+ Point anchor, double delta, int borderType,
+ const Scalar& borderValue=Scalar());
+CV_EXPORTS void copyMakeBorder(const Mat& src, Mat& dst, int top, int bottom, int left, int right,
+ int borderType, const Scalar& borderValue=Scalar());
+CV_EXPORTS Mat calcSobelKernel2D( int dx, int dy, int apertureSize, int origin=0 );
+CV_EXPORTS Mat calcLaplaceKernel2D( int aperture_size );
+
+CV_EXPORTS void initUndistortMap( const Mat& a, const Mat& k, Size sz, Mat& mapx, Mat& mapy );
+
+CV_EXPORTS void minMaxLoc(const Mat& src, double* minval, double* maxval,
+ vector<int>* minloc, vector<int>* maxloc, const Mat& mask=Mat());
+CV_EXPORTS double norm(const Mat& src, int normType, const Mat& mask=Mat());
+CV_EXPORTS double norm(const Mat& src1, const Mat& src2, int normType, const Mat& mask=Mat());
+CV_EXPORTS Scalar mean(const Mat& src, const Mat& mask=Mat());
+
+CV_EXPORTS bool cmpUlps(const Mat& data, const Mat& refdata, int expMaxDiff, double* realMaxDiff, vector<int>* idx);
+
+// compares two arrays. max_diff is the maximum actual difference,
+// success_err_level is maximum allowed difference, idx is the index of the first
+// element for which difference is >success_err_level
+// (or index of element with the maximum difference)
+CV_EXPORTS int cmpEps( const Mat& data, const Mat& refdata, double* max_diff,
+ double success_err_level, vector<int>* idx,
+ bool element_wise_relative_error );
+
+// a wrapper for the previous function. in case of error prints the message to log file.
+CV_EXPORTS int cmpEps2( TS* ts, const Mat& data, const Mat& refdata, double success_err_level,
+ bool element_wise_relative_error, const char* desc );
+
+CV_EXPORTS int cmpEps2_64f( TS* ts, const double* val, const double* refval, int len,
+ double eps, const char* param_name );
+
+CV_EXPORTS void logicOp(const Mat& src1, const Mat& src2, Mat& dst, char c);
+CV_EXPORTS void logicOp(const Mat& src, const Scalar& s, Mat& dst, char c);
+CV_EXPORTS void min(const Mat& src1, const Mat& src2, Mat& dst);
+CV_EXPORTS void min(const Mat& src, double s, Mat& dst);
+CV_EXPORTS void max(const Mat& src1, const Mat& src2, Mat& dst);
+CV_EXPORTS void max(const Mat& src, double s, Mat& dst);
+
+CV_EXPORTS void compare(const Mat& src1, const Mat& src2, Mat& dst, int cmpop);
+CV_EXPORTS void compare(const Mat& src, double s, Mat& dst, int cmpop);
+CV_EXPORTS void gemm(const Mat& src1, const Mat& src2, double alpha,
+ const Mat& src3, double beta, Mat& dst, int flags);
+ CV_EXPORTS void transform( const Mat& src, Mat& dst, const Mat& transmat, const Mat& shift );
+CV_EXPORTS double crossCorr(const Mat& src1, const Mat& src2);
+
+struct CV_EXPORTS MatInfo
+{
+ MatInfo(const Mat& _m) : m(&_m) {}
+ const Mat* m;
+};
+
+CV_EXPORTS std::ostream& operator << (std::ostream& out, const MatInfo& m);
+
+struct CV_EXPORTS MatComparator
+{
+public:
+ MatComparator(double maxdiff, int context);
+
+ ::testing::AssertionResult operator()(const char* expr1, const char* expr2,
+ const Mat& m1, const Mat& m2);
+
+ double maxdiff;
+ double realmaxdiff;
+ vector<int> loc0;
+ int context;
+};
+
+
+
+class BaseTest;
+class TS;
+
+class CV_EXPORTS BaseTest
+{
+public:
+ // constructor(s) and destructor
+ BaseTest();
+ virtual ~BaseTest();
+
+ // the main procedure of the test
+ virtual void run( int start_from );
+
+ // the wrapper for run that cares of exceptions
+ virtual void safe_run( int start_from=0 );
+
+ const string& get_name() const { return name; }
+
+ // returns true if and only if the different test cases do not depend on each other
+ // (so that test system could get right to a problematic test case)
+ virtual bool can_do_fast_forward();
+
+ // deallocates all the memory.
+ // called by init() (before initialization) and by the destructor
+ virtual void clear();
+
+protected:
+ int test_case_count; // the total number of test cases
+
+ // read test params
+ virtual int read_params( CvFileStorage* fs );
+
+ // returns the number of tests or -1 if it is unknown a-priori
+ virtual int get_test_case_count();
+
+ // prepares data for the next test case. rng seed is updated by the function
+ virtual int prepare_test_case( int test_case_idx );
+
+ // checks if the test output is valid and accurate
+ virtual int validate_test_results( int test_case_idx );
+
+ // calls the tested function. the method is called from run_test_case()
+ virtual void run_func(); // runs tested func(s)
+
+ // updates progress bar
+ virtual int update_progress( int progress, int test_case_idx, int count, double dt );
+
+ // finds test parameter
+ const CvFileNode* find_param( CvFileStorage* fs, const char* param_name );
+
+ // name of the test (it is possible to locate a test by its name)
+ string name;
+
+ // pointer to the system that includes the test
+ TS* ts;
+};
+
+
+/*****************************************************************************************\
+* Information about a failed test *
+\*****************************************************************************************/
+
+struct TestInfo
+{
+ TestInfo();
+
+ // pointer to the test
+ BaseTest* test;
+
+ // failure code (CV_FAIL*)
+ int code;
+
+ // seed value right before the data for the failed test case is prepared.
+ uint64 rng_seed;
+
+ // seed value right before running the test
+ uint64 rng_seed0;
+
+ // index of test case, can be then passed to BaseTest::proceed_to_test_case()
+ int test_case_idx;
+};
+
+/*****************************************************************************************\
+* Base Class for test system *
+\*****************************************************************************************/
+
+// common parameters:
+struct CV_EXPORTS TSParams
+{
+ TSParams();
+
+ // RNG seed, passed to and updated by every test executed.
+ uint64 rng_seed;
+
+ // whether to use IPP, MKL etc. or not
+ bool use_optimized;
+
+ // extensivity of the tests, scale factor for test_case_count
+ double test_case_count_scale;
+};
+
+
+class CV_EXPORTS TS
+{
+public:
+ // constructor(s) and destructor
+ TS();
+ virtual ~TS();
+
+ enum
+ {
+ NUL=0,
+ SUMMARY_IDX=0,
+ SUMMARY=1 << SUMMARY_IDX,
+ LOG_IDX=1,
+ LOG=1 << LOG_IDX,
+ CSV_IDX=2,
+ CSV=1 << CSV_IDX,
+ CONSOLE_IDX=3,
+ CONSOLE=1 << CONSOLE_IDX,
+ MAX_IDX=4
+ };
+
+ static TS* ptr();
+
+ // initialize test system before running the first test
+ virtual void init( const string& modulename );
+
+ // low-level printing functions that are used by individual tests and by the system itself
+ virtual void printf( int streams, const char* fmt, ... );
+ virtual void vprintf( int streams, const char* fmt, va_list arglist );
+
+ // updates the context: current test, test case, rng state
+ virtual void update_context( BaseTest* test, int test_case_idx, bool update_ts_context );
+
+ const TestInfo* get_current_test_info() { return &current_test_info; }
+
+ // sets information about a failed test
+ virtual void set_failed_test_info( int fail_code );
+
+ virtual void set_gtest_status();
+
+ // test error codes
+ enum
+ {
+ // everything is Ok
+ OK=0,
+
+ // generic error: stub value to be used
+ // temporarily if the error's cause is unknown
+ FAIL_GENERIC=-1,
+
+ // the test is missing some essential data to proceed further
+ FAIL_MISSING_TEST_DATA=-2,
+
+ // the tested function raised an error via cxcore error handler
+ FAIL_ERROR_IN_CALLED_FUNC=-3,
+
+ // an exception has been raised;
+ // for memory and arithmetic exception
+ // there are two specialized codes (see below...)
+ FAIL_EXCEPTION=-4,
+
+ // a memory exception
+ // (access violation, access to missed page, stack overflow etc.)
+ FAIL_MEMORY_EXCEPTION=-5,
+
+ // arithmetic exception (overflow, division by zero etc.)
+ FAIL_ARITHM_EXCEPTION=-6,
+
+ // the tested function corrupted memory (no exception have been raised)
+ FAIL_MEMORY_CORRUPTION_BEGIN=-7,
+ FAIL_MEMORY_CORRUPTION_END=-8,
+
+ // the tested function (or test ifself) do not deallocate some memory
+ FAIL_MEMORY_LEAK=-9,
+
+ // the tested function returned invalid object, e.g. matrix, containing NaNs,
+ // structure with NULL or out-of-range fields (while it should not)
+ FAIL_INVALID_OUTPUT=-10,
+
+ // the tested function returned valid object, but it does not match to
+ // the original (or produced by the test) object
+ FAIL_MISMATCH=-11,
+
+ // the tested function returned valid object (a single number or numerical array),
+ // but it differs too much from the original (or produced by the test) object
+ FAIL_BAD_ACCURACY=-12,
+
+ // the tested function hung. Sometimes, can be determined by unexpectedly long
+ // processing time (in this case there should be possibility to interrupt such a function
+ FAIL_HANG=-13,
+
+ // unexpected response on passing bad arguments to the tested function
+ // (the function crashed, proceed successfully (while it should not), or returned
+ // error code that is different from what is expected)
+ FAIL_BAD_ARG_CHECK=-14,
+
+ // the test data (in whole or for the particular test case) is invalid
+ FAIL_INVALID_TEST_DATA=-15,
+
+ // the test has been skipped because it is not in the selected subset of the tests to run,
+ // because it has been run already within the same run with the same parameters, or because
+ // of some other reason and this is not considered as an error.
+ // Normally TS::run() (or overridden method in the derived class) takes care of what
+ // needs to be run, so this code should not occur.
+ SKIPPED=1
+ };
+
+ // get file storage
+ CvFileStorage* get_file_storage();
+
+ // get RNG to generate random input data for a test
+ RNG& get_rng() { return rng; }
+
+ // returns the current error code
+ int get_err_code() { return current_test_info.code; }
+
+ // returns the test extensivity scale
+ double get_test_case_count_scale() { return params.test_case_count_scale; }
+
+ const string& get_data_path() const { return data_path; }
+
+ // returns textual description of failure code
+ static string str_from_code( int code );
+
+protected:
+
+ // these are allocated within a test to try keep them valid in case of stack corruption
+ RNG rng;
+
+ // information about the current test
+ TestInfo current_test_info;
+
+ // the path to data files used by tests
+ string data_path;
+
+ TSParams params;
+ std::string output_buf[MAX_IDX];
+};
+
+
+/*****************************************************************************************\
+* Subclass of BaseTest for testing functions that process dense arrays *
+\*****************************************************************************************/
+
+class CV_EXPORTS ArrayTest : public BaseTest
+{
+public:
+ // constructor(s) and destructor
+ ArrayTest();
+ virtual ~ArrayTest();
+
+ virtual void clear();
+
+protected:
+
+ virtual int read_params( CvFileStorage* fs );
+ virtual int prepare_test_case( int test_case_idx );
+ virtual int validate_test_results( int test_case_idx );
+
+ virtual void prepare_to_validation( int test_case_idx );
+ virtual void get_test_array_types_and_sizes( int test_case_idx, vector<vector<Size> >& sizes, vector<vector<int> >& types );
+ virtual void fill_array( int test_case_idx, int i, int j, Mat& arr );
+ virtual void get_minmax_bounds( int i, int j, int type, Scalar& low, Scalar& high );
+ virtual double get_success_error_level( int test_case_idx, int i, int j );
+
+ bool cvmat_allowed;
+ bool iplimage_allowed;
+ bool optional_mask;
+ bool element_wise_relative_error;
+
+ int min_log_array_size;
+ int max_log_array_size;
+
+ enum { INPUT, INPUT_OUTPUT, OUTPUT, REF_INPUT_OUTPUT, REF_OUTPUT, TEMP, MASK, MAX_ARR };
+
+ vector<vector<void*> > test_array;
+ vector<vector<Mat> > test_mat;
+ float buf[4];
+};
+
+
+class CV_EXPORTS BadArgTest : public BaseTest
+{
+public:
+ // constructor(s) and destructor
+ BadArgTest();
+ virtual ~BadArgTest();
+
+protected:
+ virtual int run_test_case( int expected_code, const string& descr );
+ virtual void run_func(void) = 0;
+ int test_case_idx;
+ int progress;
+ double t, freq;
+
+ template<class F>
+ int run_test_case( int expected_code, const string& _descr, F f)
+ {
+ double new_t = (double)cv::getTickCount(), dt;
+ if( test_case_idx < 0 )
+ {
+ test_case_idx = 0;
+ progress = 0;
+ dt = 0;
+ }
+ else
+ {
+ dt = (new_t - t)/(freq*1000);
+ t = new_t;
+ }
+ progress = update_progress(progress, test_case_idx, 0, dt);
+
+ int errcount = 0;
+ bool thrown = false;
+ const char* descr = _descr.c_str() ? _descr.c_str() : "";
+
+ try
+ {
+ f();
+ }
+ catch(const cv::Exception& e)
+ {
+ thrown = true;
+ if( e.code != expected_code )
+ {
+ ts->printf(TS::LOG, "%s (test case #%d): the error code %d is different from the expected %d\n",
+ descr, test_case_idx, e.code, expected_code);
+ errcount = 1;
+ }
+ }
+ catch(...)
+ {
+ thrown = true;
+ ts->printf(TS::LOG, "%s (test case #%d): unknown exception was thrown (the function has likely crashed)\n",
+ descr, test_case_idx);
+ errcount = 1;
+ }
+ if(!thrown)
+ {
+ ts->printf(TS::LOG, "%s (test case #%d): no expected exception was thrown\n",
+ descr, test_case_idx);
+ errcount = 1;
+ }
+ test_case_idx++;
+
+ return errcount;
+ }
+};
+
+struct CV_EXPORTS DefaultRngAuto
+{
+ const uint64 old_state;
+
+ DefaultRngAuto() : old_state(cv::theRNG().state) { cv::theRNG().state = (uint64)-1; }
+ ~DefaultRngAuto() { cv::theRNG().state = old_state; }
+
+ DefaultRngAuto& operator=(const DefaultRngAuto&);
+};
+
+}
+
+namespace cvtest
+{
+
+// test images generation functions
+CV_EXPORTS void fillGradient(Mat& img, int delta = 5);
+CV_EXPORTS void smoothBorder(Mat& img, const Scalar& color, int delta = 3);
+
+CV_EXPORTS void printVersionInfo(bool useStdOut = true);
+} //namespace cvtest
+
+// fills c with zeros
+CV_EXPORTS void cvTsZero( CvMat* c, const CvMat* mask=0 );
+
+// copies a to b (whole matrix or only the selected region)
+CV_EXPORTS void cvTsCopy( const CvMat* a, CvMat* b, const CvMat* mask=0 );
+
+// converts one array to another
+CV_EXPORTS void cvTsConvert( const CvMat* src, CvMat* dst );
+
+CV_EXPORTS void cvTsGEMM( const CvMat* a, const CvMat* b, double alpha,
+ const CvMat* c, double beta, CvMat* d, int flags );
+
+#ifndef __CV_TEST_EXEC_ARGS
+#if defined(_MSC_VER) && (_MSC_VER <= 1400)
+#define __CV_TEST_EXEC_ARGS(...) \
+ while (++argc >= (--argc,-1)) {__VA_ARGS__; break;} /*this ugly construction is needed for VS 2005*/
+#else
+#define __CV_TEST_EXEC_ARGS(...) \
+ __VA_ARGS__;
+#endif
+#endif
+
+#define CV_TEST_MAIN(resourcesubdir, ...) \
+int main(int argc, char **argv) \
+{ \
+ cvtest::TS::ptr()->init(resourcesubdir); \
+ ::testing::InitGoogleTest(&argc, argv); \
+ cvtest::printVersionInfo(); \
+ __CV_TEST_EXEC_ARGS(__VA_ARGS__) \
+ return RUN_ALL_TESTS(); \
+}
+
+// This usually only makes sense in perf tests with several implementations,
+// some of which are not available.
+#define CV_TEST_FAIL_NO_IMPL() do { \
+ ::testing::Test::RecordProperty("custom_status", "noimpl"); \
+ FAIL() << "No equivalent implementation."; \
+} while (0)
+
+#endif
+
+#include "ts_perf.hpp"
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/ts/ts_gtest.h b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/ts/ts_gtest.h
new file mode 100644
index 00000000..75453d20
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/ts/ts_gtest.h
@@ -0,0 +1,20125 @@
+// Copyright 2005, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan)
+//
+// The Google C++ Testing Framework (Google Test)
+//
+// This header file defines the public API for Google Test. It should be
+// included by any test program that uses Google Test.
+//
+// IMPORTANT NOTE: Due to limitation of the C++ language, we have to
+// leave some internal implementation details in this header file.
+// They are clearly marked by comments like this:
+//
+// // INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+//
+// Such code is NOT meant to be used by a user directly, and is subject
+// to CHANGE WITHOUT NOTICE. Therefore DO NOT DEPEND ON IT in a user
+// program!
+//
+// Acknowledgment: Google Test borrowed the idea of automatic test
+// registration from Barthelemy Dagenais' (barthelemy@prologique.com)
+// easyUnit framework.
+
+#ifndef GTEST_INCLUDE_GTEST_GTEST_H_
+#define GTEST_INCLUDE_GTEST_GTEST_H_
+
+#include <limits>
+#include <ostream>
+#include <vector>
+
+// Copyright 2005, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Authors: wan@google.com (Zhanyong Wan), eefacm@gmail.com (Sean Mcafee)
+//
+// The Google C++ Testing Framework (Google Test)
+//
+// This header file declares functions and macros used internally by
+// Google Test. They are subject to change without notice.
+
+#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_INTERNAL_H_
+#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_INTERNAL_H_
+
+// Copyright 2005, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Authors: wan@google.com (Zhanyong Wan)
+//
+// Low-level types and utilities for porting Google Test to various
+// platforms. They are subject to change without notice. DO NOT USE
+// THEM IN USER CODE.
+//
+// This file is fundamental to Google Test. All other Google Test source
+// files are expected to #include this. Therefore, it cannot #include
+// any other Google Test header.
+
+#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PORT_H_
+#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PORT_H_
+
+// The user can define the following macros in the build script to
+// control Google Test's behavior. If the user doesn't define a macro
+// in this list, Google Test will define it.
+//
+// GTEST_HAS_CLONE - Define it to 1/0 to indicate that clone(2)
+// is/isn't available.
+// GTEST_HAS_EXCEPTIONS - Define it to 1/0 to indicate that exceptions
+// are enabled.
+// GTEST_HAS_GLOBAL_STRING - Define it to 1/0 to indicate that ::string
+// is/isn't available (some systems define
+// ::string, which is different to std::string).
+// GTEST_HAS_GLOBAL_WSTRING - Define it to 1/0 to indicate that ::string
+// is/isn't available (some systems define
+// ::wstring, which is different to std::wstring).
+// GTEST_HAS_POSIX_RE - Define it to 1/0 to indicate that POSIX regular
+// expressions are/aren't available.
+// GTEST_HAS_PTHREAD - Define it to 1/0 to indicate that <pthread.h>
+// is/isn't available.
+// GTEST_HAS_RTTI - Define it to 1/0 to indicate that RTTI is/isn't
+// enabled.
+// GTEST_HAS_STD_WSTRING - Define it to 1/0 to indicate that
+// std::wstring does/doesn't work (Google Test can
+// be used where std::wstring is unavailable).
+// GTEST_HAS_TR1_TUPLE - Define it to 1/0 to indicate tr1::tuple
+// is/isn't available.
+// GTEST_HAS_SEH - Define it to 1/0 to indicate whether the
+// compiler supports Microsoft's "Structured
+// Exception Handling".
+// GTEST_HAS_STREAM_REDIRECTION
+// - Define it to 1/0 to indicate whether the
+// platform supports I/O stream redirection using
+// dup() and dup2().
+// GTEST_USE_OWN_TR1_TUPLE - Define it to 1/0 to indicate whether Google
+// Test's own tr1 tuple implementation should be
+// used. Unused when the user sets
+// GTEST_HAS_TR1_TUPLE to 0.
+// GTEST_LANG_CXX11 - Define it to 1/0 to indicate that Google Test
+// is building in C++11/C++98 mode.
+// GTEST_LINKED_AS_SHARED_LIBRARY
+// - Define to 1 when compiling tests that use
+// Google Test as a shared library (known as
+// DLL on Windows).
+// GTEST_CREATE_SHARED_LIBRARY
+// - Define to 1 when compiling Google Test itself
+// as a shared library.
+
+// This header defines the following utilities:
+//
+// Macros indicating the current platform (defined to 1 if compiled on
+// the given platform; otherwise undefined):
+// GTEST_OS_AIX - IBM AIX
+// GTEST_OS_CYGWIN - Cygwin
+// GTEST_OS_HPUX - HP-UX
+// GTEST_OS_LINUX - Linux
+// GTEST_OS_LINUX_ANDROID - Google Android
+// GTEST_OS_MAC - Mac OS X
+// GTEST_OS_IOS - iOS
+// GTEST_OS_IOS_SIMULATOR - iOS simulator
+// GTEST_OS_NACL - Google Native Client (NaCl)
+// GTEST_OS_OPENBSD - OpenBSD
+// GTEST_OS_QNX - QNX
+// GTEST_OS_SOLARIS - Sun Solaris
+// GTEST_OS_SYMBIAN - Symbian
+// GTEST_OS_WINDOWS - Windows (Desktop, MinGW, or Mobile)
+// GTEST_OS_WINDOWS_DESKTOP - Windows Desktop
+// GTEST_OS_WINDOWS_MINGW - MinGW
+// GTEST_OS_WINDOWS_MOBILE - Windows Mobile
+// GTEST_OS_ZOS - z/OS
+//
+// Among the platforms, Cygwin, Linux, Max OS X, and Windows have the
+// most stable support. Since core members of the Google Test project
+// don't have access to other platforms, support for them may be less
+// stable. If you notice any problems on your platform, please notify
+// googletestframework@googlegroups.com (patches for fixing them are
+// even more welcome!).
+//
+// Note that it is possible that none of the GTEST_OS_* macros are defined.
+//
+// Macros indicating available Google Test features (defined to 1 if
+// the corresponding feature is supported; otherwise undefined):
+// GTEST_HAS_COMBINE - the Combine() function (for value-parameterized
+// tests)
+// GTEST_HAS_DEATH_TEST - death tests
+// GTEST_HAS_PARAM_TEST - value-parameterized tests
+// GTEST_HAS_TYPED_TEST - typed tests
+// GTEST_HAS_TYPED_TEST_P - type-parameterized tests
+// GTEST_USES_POSIX_RE - enhanced POSIX regex is used. Do not confuse with
+// GTEST_HAS_POSIX_RE (see above) which users can
+// define themselves.
+// GTEST_USES_SIMPLE_RE - our own simple regex is used;
+// the above two are mutually exclusive.
+// GTEST_CAN_COMPARE_NULL - accepts untyped NULL in EXPECT_EQ().
+//
+// Macros for basic C++ coding:
+// GTEST_AMBIGUOUS_ELSE_BLOCKER_ - for disabling a gcc warning.
+// GTEST_ATTRIBUTE_UNUSED_ - declares that a class' instances or a
+// variable don't have to be used.
+// GTEST_DISALLOW_ASSIGN_ - disables operator=.
+// GTEST_DISALLOW_COPY_AND_ASSIGN_ - disables copy ctor and operator=.
+// GTEST_MUST_USE_RESULT_ - declares that a function's result must be used.
+//
+// Synchronization:
+// Mutex, MutexLock, ThreadLocal, GetThreadCount()
+// - synchronization primitives.
+// GTEST_IS_THREADSAFE - defined to 1 to indicate that the above
+// synchronization primitives have real implementations
+// and Google Test is thread-safe; or 0 otherwise.
+//
+// Template meta programming:
+// is_pointer - as in TR1; needed on Symbian and IBM XL C/C++ only.
+// IteratorTraits - partial implementation of std::iterator_traits, which
+// is not available in libCstd when compiled with Sun C++.
+//
+// Smart pointers:
+// scoped_ptr - as in TR2.
+//
+// Regular expressions:
+// RE - a simple regular expression class using the POSIX
+// Extended Regular Expression syntax on UNIX-like
+// platforms, or a reduced regular exception syntax on
+// other platforms, including Windows.
+//
+// Logging:
+// GTEST_LOG_() - logs messages at the specified severity level.
+// LogToStderr() - directs all log messages to stderr.
+// FlushInfoLog() - flushes informational log messages.
+//
+// Stdout and stderr capturing:
+// CaptureStdout() - starts capturing stdout.
+// GetCapturedStdout() - stops capturing stdout and returns the captured
+// string.
+// CaptureStderr() - starts capturing stderr.
+// GetCapturedStderr() - stops capturing stderr and returns the captured
+// string.
+//
+// Integer types:
+// TypeWithSize - maps an integer to a int type.
+// Int32, UInt32, Int64, UInt64, TimeInMillis
+// - integers of known sizes.
+// BiggestInt - the biggest signed integer type.
+//
+// Command-line utilities:
+// GTEST_FLAG() - references a flag.
+// GTEST_DECLARE_*() - declares a flag.
+// GTEST_DEFINE_*() - defines a flag.
+// GetInjectableArgvs() - returns the command line as a vector of strings.
+//
+// Environment variable utilities:
+// GetEnv() - gets the value of an environment variable.
+// BoolFromGTestEnv() - parses a bool environment variable.
+// Int32FromGTestEnv() - parses an Int32 environment variable.
+// StringFromGTestEnv() - parses a string environment variable.
+
+#include <ctype.h> // for isspace, etc
+#include <stddef.h> // for ptrdiff_t
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#ifndef _WIN32_WCE
+# include <sys/types.h>
+# include <sys/stat.h>
+#endif // !_WIN32_WCE
+
+#if defined __APPLE__
+# include <AvailabilityMacros.h>
+# include <TargetConditionals.h>
+#endif
+
+#include <iostream> // NOLINT
+#include <sstream> // NOLINT
+#include <string> // NOLINT
+
+#define GTEST_DEV_EMAIL_ "googletestframework@@googlegroups.com"
+#define GTEST_FLAG_PREFIX_ "gtest_"
+#define GTEST_FLAG_PREFIX_DASH_ "gtest-"
+#define GTEST_FLAG_PREFIX_UPPER_ "GTEST_"
+#define GTEST_NAME_ "Google Test"
+#define GTEST_PROJECT_URL_ "http://code.google.com/p/googletest/"
+
+// Determines the version of gcc that is used to compile this.
+#ifdef __GNUC__
+// 40302 means version 4.3.2.
+# define GTEST_GCC_VER_ \
+ (__GNUC__*10000 + __GNUC_MINOR__*100 + __GNUC_PATCHLEVEL__)
+#endif // __GNUC__
+
+// Determines the platform on which Google Test is compiled.
+#define GTEST_OS_CYGWIN 0
+#define GTEST_OS_SYMBIAN 0
+#define GTEST_OS_WINDOWS 0
+#define GTEST_OS_WINDOWS_MOBILE 0
+#define GTEST_OS_WINDOWS_MINGW 0
+#define GTEST_OS_WINDOWS_DESKTOP 0
+#define GTEST_OS_MAC 0
+#define GTEST_OS_LINUX 0
+#define GTEST_OS_LINUX_ANDROID 0
+#define GTEST_OS_ZOS 0
+#define GTEST_OS_SOLARIS 0
+#define GTEST_OS_AIX 0
+#define GTEST_OS_HPUX 0
+#define GTEST_OS_NACL 0
+#define GTEST_OS_OPENBSD 0
+#define GTEST_OS_QNX 0
+#define GTEST_OS_IOS 0
+#define GTEST_OS_IOS_SIMULATOR 0
+
+#ifdef __CYGWIN__
+# undef GTEST_OS_CYGWIN
+# define GTEST_OS_CYGWIN 1
+#elif defined __SYMBIAN32__
+# undef GTEST_OS_SYMBIAN
+# define GTEST_OS_SYMBIAN 1
+#elif defined _WIN32
+# undef GTEST_OS_WINDOWS
+# define GTEST_OS_WINDOWS 1
+# ifdef _WIN32_WCE
+# undef GTEST_OS_WINDOWS_MOBILE
+# define GTEST_OS_WINDOWS_MOBILE 1
+# elif defined(__MINGW__) || defined(__MINGW32__)
+# undef GTEST_OS_WINDOWS_MINGW
+# define GTEST_OS_WINDOWS_MINGW 1
+# else
+# undef GTEST_OS_WINDOWS_DESKTOP
+# define GTEST_OS_WINDOWS_DESKTOP 1
+# endif // _WIN32_WCE
+#elif defined __APPLE__
+# undef GTEST_OS_MAC
+# define GTEST_OS_MAC 1
+# if TARGET_OS_IPHONE
+# undef GTEST_OS_IOS
+# define GTEST_OS_IOS 1
+# if TARGET_IPHONE_SIMULATOR
+# undef GTEST_OS_IOS_SIMULATOR
+# define GTEST_OS_IOS_SIMULATOR 1
+# endif
+# endif
+#elif defined __linux__
+# undef GTEST_OS_LINUX
+# define GTEST_OS_LINUX 1
+# if defined __ANDROID__
+# undef GTEST_OS_LINUX_ANDROID
+# define GTEST_OS_LINUX_ANDROID 1
+# endif
+#elif defined __MVS__
+# undef GTEST_OS_ZOS
+# define GTEST_OS_ZOS 1
+#elif defined(__sun) && defined(__SVR4)
+# undef GTEST_OS_SOLARIS
+# define GTEST_OS_SOLARIS 1
+#elif defined(_AIX)
+# undef GTEST_OS_AIX
+# define GTEST_OS_AIX 1
+#elif defined(__hpux)
+# undef GTEST_OS_HPUX
+# define GTEST_OS_HPUX 1
+#elif defined __native_client__
+# undef GTEST_OS_NACL
+# define GTEST_OS_NACL 1
+#elif defined __OpenBSD__
+# undef GTEST_OS_OPENBSD
+# define GTEST_OS_OPENBSD 1
+#elif defined __QNX__
+# undef GTEST_OS_QNX
+# define GTEST_OS_QNX 1
+#endif // __CYGWIN__
+
+#ifndef GTEST_LANG_CXX11
+// gcc and clang define __GXX_EXPERIMENTAL_CXX0X__ when
+// -std={c,gnu}++{0x,11} is passed. The C++11 standard specifies a
+// value for __cplusplus, and recent versions of clang, gcc, and
+// probably other compilers set that too in C++11 mode.
+# if defined __GXX_EXPERIMENTAL_CXX0X__ || __cplusplus >= 201103L
+// Compiling in at least C++11 mode.
+# define GTEST_LANG_CXX11 1
+# else
+# define GTEST_LANG_CXX11 0
+# endif
+#endif
+
+// Brings in definitions for functions used in the testing::internal::posix
+// namespace (read, write, close, chdir, isatty, stat). We do not currently
+// use them on Windows Mobile.
+#if !GTEST_OS_WINDOWS
+// This assumes that non-Windows OSes provide unistd.h. For OSes where this
+// is not the case, we need to include headers that provide the functions
+// mentioned above.
+# include <unistd.h>
+# include <strings.h>
+#elif !GTEST_OS_WINDOWS_MOBILE
+# include <direct.h>
+# include <io.h>
+#endif
+
+#if GTEST_OS_LINUX_ANDROID
+// Used to define __ANDROID_API__ matching the target NDK API level.
+# include <android/api-level.h> // NOLINT
+#endif
+
+// Defines this to true iff Google Test can use POSIX regular expressions.
+#ifndef GTEST_HAS_POSIX_RE
+# if GTEST_OS_LINUX_ANDROID
+// On Android, <regex.h> is only available starting with Froyo.
+# define GTEST_HAS_POSIX_RE (__ANDROID_API__ >= 8)
+# else
+# define GTEST_HAS_POSIX_RE (!GTEST_OS_WINDOWS)
+#endif
+#endif
+
+#if GTEST_HAS_POSIX_RE
+
+// On some platforms, <regex.h> needs someone to define size_t, and
+// won't compile otherwise. We can #include it here as we already
+// included <stdlib.h>, which is guaranteed to define size_t through
+// <stddef.h>.
+# include <regex.h> // NOLINT
+
+# define GTEST_USES_POSIX_RE 1
+# define GTEST_USES_SIMPLE_RE 0
+
+#elif GTEST_OS_WINDOWS
+
+// <regex.h> is not available on Windows. Use our own simple regex
+// implementation instead.
+# define GTEST_USES_SIMPLE_RE 1
+# define GTEST_USES_POSIX_RE 0
+
+#else
+
+// <regex.h> may not be available on this platform. Use our own
+// simple regex implementation instead.
+# define GTEST_USES_SIMPLE_RE 1
+# define GTEST_USES_POSIX_RE 0
+
+#endif // GTEST_HAS_POSIX_RE
+
+#ifndef GTEST_HAS_EXCEPTIONS
+// The user didn't tell us whether exceptions are enabled, so we need
+// to figure it out.
+# if defined(_MSC_VER) || defined(__BORLANDC__)
+// MSVC's and C++Builder's implementations of the STL use the _HAS_EXCEPTIONS
+// macro to enable exceptions, so we'll do the same.
+// Assumes that exceptions are enabled by default.
+# ifndef _HAS_EXCEPTIONS
+# define _HAS_EXCEPTIONS 1
+# endif // _HAS_EXCEPTIONS
+# define GTEST_HAS_EXCEPTIONS _HAS_EXCEPTIONS
+# elif defined(__GNUC__) && __EXCEPTIONS
+// gcc defines __EXCEPTIONS to 1 iff exceptions are enabled.
+# define GTEST_HAS_EXCEPTIONS 1
+# elif defined(__SUNPRO_CC)
+// Sun Pro CC supports exceptions. However, there is no compile-time way of
+// detecting whether they are enabled or not. Therefore, we assume that
+// they are enabled unless the user tells us otherwise.
+# define GTEST_HAS_EXCEPTIONS 1
+# elif defined(__IBMCPP__) && __EXCEPTIONS
+// xlC defines __EXCEPTIONS to 1 iff exceptions are enabled.
+# define GTEST_HAS_EXCEPTIONS 1
+# elif defined(__HP_aCC)
+// Exception handling is in effect by default in HP aCC compiler. It has to
+// be turned of by +noeh compiler option if desired.
+# define GTEST_HAS_EXCEPTIONS 1
+# else
+// For other compilers, we assume exceptions are disabled to be
+// conservative.
+# define GTEST_HAS_EXCEPTIONS 0
+# endif // defined(_MSC_VER) || defined(__BORLANDC__)
+#endif // GTEST_HAS_EXCEPTIONS
+
+#if !defined(GTEST_HAS_STD_STRING)
+// Even though we don't use this macro any longer, we keep it in case
+// some clients still depend on it.
+# define GTEST_HAS_STD_STRING 1
+#elif !GTEST_HAS_STD_STRING
+// The user told us that ::std::string isn't available.
+# error "Google Test cannot be used where ::std::string isn't available."
+#endif // !defined(GTEST_HAS_STD_STRING)
+
+#ifndef GTEST_HAS_GLOBAL_STRING
+// The user didn't tell us whether ::string is available, so we need
+// to figure it out.
+
+# define GTEST_HAS_GLOBAL_STRING 0
+
+#endif // GTEST_HAS_GLOBAL_STRING
+
+#ifndef GTEST_HAS_STD_WSTRING
+// The user didn't tell us whether ::std::wstring is available, so we need
+// to figure it out.
+// TODO(wan@google.com): uses autoconf to detect whether ::std::wstring
+// is available.
+
+// Cygwin 1.7 and below doesn't support ::std::wstring.
+// Solaris' libc++ doesn't support it either. Android has
+// no support for it at least as recent as Froyo (2.2).
+# define GTEST_HAS_STD_WSTRING \
+ (!(GTEST_OS_LINUX_ANDROID || GTEST_OS_CYGWIN || GTEST_OS_SOLARIS))
+
+#endif // GTEST_HAS_STD_WSTRING
+
+#ifndef GTEST_HAS_GLOBAL_WSTRING
+// The user didn't tell us whether ::wstring is available, so we need
+// to figure it out.
+# define GTEST_HAS_GLOBAL_WSTRING \
+ (GTEST_HAS_STD_WSTRING && GTEST_HAS_GLOBAL_STRING)
+#endif // GTEST_HAS_GLOBAL_WSTRING
+
+// Determines whether RTTI is available.
+#ifndef GTEST_HAS_RTTI
+// The user didn't tell us whether RTTI is enabled, so we need to
+// figure it out.
+
+# ifdef _MSC_VER
+
+# ifdef _CPPRTTI // MSVC defines this macro iff RTTI is enabled.
+# define GTEST_HAS_RTTI 1
+# else
+# define GTEST_HAS_RTTI 0
+# endif
+
+// Starting with version 4.3.2, gcc defines __GXX_RTTI iff RTTI is enabled.
+# elif defined(__GNUC__) && (GTEST_GCC_VER_ >= 40302)
+
+# ifdef __GXX_RTTI
+// When building against STLport with the Android NDK and with
+// -frtti -fno-exceptions, the build fails at link time with undefined
+// references to __cxa_bad_typeid. Note sure if STL or toolchain bug,
+// so disable RTTI when detected.
+# if GTEST_OS_LINUX_ANDROID && defined(_STLPORT_MAJOR) && \
+ !defined(__EXCEPTIONS)
+# define GTEST_HAS_RTTI 0
+# else
+# define GTEST_HAS_RTTI 1
+# endif // GTEST_OS_LINUX_ANDROID && _STLPORT_MAJOR && !__EXCEPTIONS
+# else
+# define GTEST_HAS_RTTI 0
+# endif // __GXX_RTTI
+
+// Clang defines __GXX_RTTI starting with version 3.0, but its manual recommends
+// using has_feature instead. has_feature(cxx_rtti) is supported since 2.7, the
+// first version with C++ support.
+# elif defined(__clang__)
+
+# define GTEST_HAS_RTTI __has_feature(cxx_rtti)
+
+// Starting with version 9.0 IBM Visual Age defines __RTTI_ALL__ to 1 if
+// both the typeid and dynamic_cast features are present.
+# elif defined(__IBMCPP__) && (__IBMCPP__ >= 900)
+
+# ifdef __RTTI_ALL__
+# define GTEST_HAS_RTTI 1
+# else
+# define GTEST_HAS_RTTI 0
+# endif
+
+# else
+
+// For all other compilers, we assume RTTI is enabled.
+# define GTEST_HAS_RTTI 1
+
+# endif // _MSC_VER
+
+#endif // GTEST_HAS_RTTI
+
+// It's this header's responsibility to #include <typeinfo> when RTTI
+// is enabled.
+#if GTEST_HAS_RTTI
+# include <typeinfo>
+#endif
+
+// Determines whether Google Test can use the pthreads library.
+#ifndef GTEST_HAS_PTHREAD
+// The user didn't tell us explicitly, so we assume pthreads support is
+// available on Linux and Mac.
+//
+// To disable threading support in Google Test, add -DGTEST_HAS_PTHREAD=0
+// to your compiler flags.
+# define GTEST_HAS_PTHREAD (GTEST_OS_LINUX || GTEST_OS_MAC || GTEST_OS_HPUX \
+ || GTEST_OS_QNX)
+#endif // GTEST_HAS_PTHREAD
+
+#if GTEST_HAS_PTHREAD
+// gtest-port.h guarantees to #include <pthread.h> when GTEST_HAS_PTHREAD is
+// true.
+# include <pthread.h> // NOLINT
+
+// For timespec and nanosleep, used below.
+# include <time.h> // NOLINT
+#endif
+
+// Determines whether Google Test can use tr1/tuple. You can define
+// this macro to 0 to prevent Google Test from using tuple (any
+// feature depending on tuple with be disabled in this mode).
+#ifndef GTEST_HAS_TR1_TUPLE
+# if GTEST_OS_LINUX_ANDROID && defined(_STLPORT_MAJOR)
+// STLport, provided with the Android NDK, has neither <tr1/tuple> or <tuple>.
+# define GTEST_HAS_TR1_TUPLE 0
+# else
+// The user didn't tell us not to do it, so we assume it's OK.
+# define GTEST_HAS_TR1_TUPLE 1
+# endif
+#endif // GTEST_HAS_TR1_TUPLE
+
+// Determines whether Google Test's own tr1 tuple implementation
+// should be used.
+#ifndef GTEST_USE_OWN_TR1_TUPLE
+// The user didn't tell us, so we need to figure it out.
+
+// We use our own TR1 tuple if we aren't sure the user has an
+// implementation of it already. At this time, libstdc++ 4.0.0+ and
+// MSVC 2010 are the only mainstream standard libraries that come
+// with a TR1 tuple implementation. NVIDIA's CUDA NVCC compiler
+// pretends to be GCC by defining __GNUC__ and friends, but cannot
+// compile GCC's tuple implementation. MSVC 2008 (9.0) provides TR1
+// tuple in a 323 MB Feature Pack download, which we cannot assume the
+// user has. QNX's QCC compiler is a modified GCC but it doesn't
+// support TR1 tuple. libc++ only provides std::tuple, in C++11 mode,
+// and it can be used with some compilers that define __GNUC__.
+# if (defined(__GNUC__) && !defined(__CUDACC__) && (GTEST_GCC_VER_ >= 40000) \
+ && !GTEST_OS_QNX && !defined(_LIBCPP_VERSION)) && !defined(_STLPORT_MAJOR) \
+ || (defined(_MSC_VER) && _MSC_VER >= 1600)
+# define GTEST_ENV_HAS_TR1_TUPLE_ 1
+# else
+# define GTEST_ENV_HAS_TR1_TUPLE_ 0
+# endif
+
+// C++11 specifies that <tuple> provides std::tuple. Use that if gtest is used
+// in C++11 mode and libstdc++ isn't very old (binaries targeting OS X 10.6
+// can build with clang but need to use gcc4.2's libstdc++).
+# if GTEST_LANG_CXX11 && (!defined(__GLIBCXX__) || __GLIBCXX__ > 20110325)
+# define GTEST_ENV_HAS_STD_TUPLE_ 1
+# else
+# define GTEST_ENV_HAS_STD_TUPLE_ 0
+# endif
+
+# if GTEST_ENV_HAS_TR1_TUPLE_ || GTEST_ENV_HAS_STD_TUPLE_
+# define GTEST_USE_OWN_TR1_TUPLE 0
+# else
+# define GTEST_USE_OWN_TR1_TUPLE 1
+# undef GTEST_HAS_TR1_TUPLE
+# define GTEST_HAS_TR1_TUPLE 1
+# endif
+
+#endif // GTEST_USE_OWN_TR1_TUPLE
+
+// To avoid conditional compilation everywhere, we make it
+// gtest-port.h's responsibility to #include the header implementing
+// tr1/tuple.
+#if GTEST_HAS_TR1_TUPLE
+
+# if GTEST_USE_OWN_TR1_TUPLE
+// This file was GENERATED by command:
+// pump.py gtest-tuple.h.pump
+// DO NOT EDIT BY HAND!!!
+
+// Copyright 2009 Google Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan)
+
+// Implements a subset of TR1 tuple needed by Google Test and Google Mock.
+
+#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_TUPLE_H_
+#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_TUPLE_H_
+
+#include <utility> // For ::std::pair.
+
+// The compiler used in Symbian has a bug that prevents us from declaring the
+// tuple template as a friend (it complains that tuple is redefined). This
+// hack bypasses the bug by declaring the members that should otherwise be
+// private as public.
+// Sun Studio versions < 12 also have the above bug.
+#if defined(__SYMBIAN32__) || (defined(__SUNPRO_CC) && __SUNPRO_CC < 0x590)
+# define GTEST_DECLARE_TUPLE_AS_FRIEND_ public:
+#else
+# define GTEST_DECLARE_TUPLE_AS_FRIEND_ \
+ template <GTEST_10_TYPENAMES_(U)> friend class tuple; \
+ private:
+#endif
+
+// GTEST_n_TUPLE_(T) is the type of an n-tuple.
+#define GTEST_0_TUPLE_(T) tuple<>
+#define GTEST_1_TUPLE_(T) tuple<T##0, void, void, void, void, void, void, \
+ void, void, void>
+#define GTEST_2_TUPLE_(T) tuple<T##0, T##1, void, void, void, void, void, \
+ void, void, void>
+#define GTEST_3_TUPLE_(T) tuple<T##0, T##1, T##2, void, void, void, void, \
+ void, void, void>
+#define GTEST_4_TUPLE_(T) tuple<T##0, T##1, T##2, T##3, void, void, void, \
+ void, void, void>
+#define GTEST_5_TUPLE_(T) tuple<T##0, T##1, T##2, T##3, T##4, void, void, \
+ void, void, void>
+#define GTEST_6_TUPLE_(T) tuple<T##0, T##1, T##2, T##3, T##4, T##5, void, \
+ void, void, void>
+#define GTEST_7_TUPLE_(T) tuple<T##0, T##1, T##2, T##3, T##4, T##5, T##6, \
+ void, void, void>
+#define GTEST_8_TUPLE_(T) tuple<T##0, T##1, T##2, T##3, T##4, T##5, T##6, \
+ T##7, void, void>
+#define GTEST_9_TUPLE_(T) tuple<T##0, T##1, T##2, T##3, T##4, T##5, T##6, \
+ T##7, T##8, void>
+#define GTEST_10_TUPLE_(T) tuple<T##0, T##1, T##2, T##3, T##4, T##5, T##6, \
+ T##7, T##8, T##9>
+
+// GTEST_n_TYPENAMES_(T) declares a list of n typenames.
+#define GTEST_0_TYPENAMES_(T)
+#define GTEST_1_TYPENAMES_(T) typename T##0
+#define GTEST_2_TYPENAMES_(T) typename T##0, typename T##1
+#define GTEST_3_TYPENAMES_(T) typename T##0, typename T##1, typename T##2
+#define GTEST_4_TYPENAMES_(T) typename T##0, typename T##1, typename T##2, \
+ typename T##3
+#define GTEST_5_TYPENAMES_(T) typename T##0, typename T##1, typename T##2, \
+ typename T##3, typename T##4
+#define GTEST_6_TYPENAMES_(T) typename T##0, typename T##1, typename T##2, \
+ typename T##3, typename T##4, typename T##5
+#define GTEST_7_TYPENAMES_(T) typename T##0, typename T##1, typename T##2, \
+ typename T##3, typename T##4, typename T##5, typename T##6
+#define GTEST_8_TYPENAMES_(T) typename T##0, typename T##1, typename T##2, \
+ typename T##3, typename T##4, typename T##5, typename T##6, typename T##7
+#define GTEST_9_TYPENAMES_(T) typename T##0, typename T##1, typename T##2, \
+ typename T##3, typename T##4, typename T##5, typename T##6, \
+ typename T##7, typename T##8
+#define GTEST_10_TYPENAMES_(T) typename T##0, typename T##1, typename T##2, \
+ typename T##3, typename T##4, typename T##5, typename T##6, \
+ typename T##7, typename T##8, typename T##9
+
+// In theory, defining stuff in the ::std namespace is undefined
+// behavior. We can do this as we are playing the role of a standard
+// library vendor.
+namespace std {
+namespace tr1 {
+
+template <typename T0 = void, typename T1 = void, typename T2 = void,
+ typename T3 = void, typename T4 = void, typename T5 = void,
+ typename T6 = void, typename T7 = void, typename T8 = void,
+ typename T9 = void>
+class tuple;
+
+// Anything in namespace gtest_internal is Google Test's INTERNAL
+// IMPLEMENTATION DETAIL and MUST NOT BE USED DIRECTLY in user code.
+namespace gtest_internal {
+
+// ByRef<T>::type is T if T is a reference; otherwise it's const T&.
+template <typename T>
+struct ByRef { typedef const T& type; }; // NOLINT
+template <typename T>
+struct ByRef<T&> { typedef T& type; }; // NOLINT
+
+// A handy wrapper for ByRef.
+#define GTEST_BY_REF_(T) typename ::std::tr1::gtest_internal::ByRef<T>::type
+
+// AddRef<T>::type is T if T is a reference; otherwise it's T&. This
+// is the same as tr1::add_reference<T>::type.
+template <typename T>
+struct AddRef { typedef T& type; }; // NOLINT
+template <typename T>
+struct AddRef<T&> { typedef T& type; }; // NOLINT
+
+// A handy wrapper for AddRef.
+#define GTEST_ADD_REF_(T) typename ::std::tr1::gtest_internal::AddRef<T>::type
+
+// A helper for implementing get<k>().
+template <int k> class Get;
+
+// A helper for implementing tuple_element<k, T>. kIndexValid is true
+// iff k < the number of fields in tuple type T.
+template <bool kIndexValid, int kIndex, class Tuple>
+struct TupleElement;
+
+template <GTEST_10_TYPENAMES_(T)>
+struct TupleElement<true, 0, GTEST_10_TUPLE_(T)> {
+ typedef T0 type;
+};
+
+template <GTEST_10_TYPENAMES_(T)>
+struct TupleElement<true, 1, GTEST_10_TUPLE_(T)> {
+ typedef T1 type;
+};
+
+template <GTEST_10_TYPENAMES_(T)>
+struct TupleElement<true, 2, GTEST_10_TUPLE_(T)> {
+ typedef T2 type;
+};
+
+template <GTEST_10_TYPENAMES_(T)>
+struct TupleElement<true, 3, GTEST_10_TUPLE_(T)> {
+ typedef T3 type;
+};
+
+template <GTEST_10_TYPENAMES_(T)>
+struct TupleElement<true, 4, GTEST_10_TUPLE_(T)> {
+ typedef T4 type;
+};
+
+template <GTEST_10_TYPENAMES_(T)>
+struct TupleElement<true, 5, GTEST_10_TUPLE_(T)> {
+ typedef T5 type;
+};
+
+template <GTEST_10_TYPENAMES_(T)>
+struct TupleElement<true, 6, GTEST_10_TUPLE_(T)> {
+ typedef T6 type;
+};
+
+template <GTEST_10_TYPENAMES_(T)>
+struct TupleElement<true, 7, GTEST_10_TUPLE_(T)> {
+ typedef T7 type;
+};
+
+template <GTEST_10_TYPENAMES_(T)>
+struct TupleElement<true, 8, GTEST_10_TUPLE_(T)> {
+ typedef T8 type;
+};
+
+template <GTEST_10_TYPENAMES_(T)>
+struct TupleElement<true, 9, GTEST_10_TUPLE_(T)> {
+ typedef T9 type;
+};
+
+} // namespace gtest_internal
+
+template <>
+class tuple<> {
+ public:
+ tuple() {}
+ tuple(const tuple& /* t */) {}
+ tuple& operator=(const tuple& /* t */) { return *this; }
+};
+
+template <GTEST_1_TYPENAMES_(T)>
+class GTEST_1_TUPLE_(T) {
+ public:
+ template <int k> friend class gtest_internal::Get;
+
+ tuple() : f0_() {}
+
+ explicit tuple(GTEST_BY_REF_(T0) f0) : f0_(f0) {}
+
+ tuple(const tuple& t) : f0_(t.f0_) {}
+
+ template <GTEST_1_TYPENAMES_(U)>
+ tuple(const GTEST_1_TUPLE_(U)& t) : f0_(t.f0_) {}
+
+ tuple& operator=(const tuple& t) { return CopyFrom(t); }
+
+ template <GTEST_1_TYPENAMES_(U)>
+ tuple& operator=(const GTEST_1_TUPLE_(U)& t) {
+ return CopyFrom(t);
+ }
+
+ GTEST_DECLARE_TUPLE_AS_FRIEND_
+
+ template <GTEST_1_TYPENAMES_(U)>
+ tuple& CopyFrom(const GTEST_1_TUPLE_(U)& t) {
+ f0_ = t.f0_;
+ return *this;
+ }
+
+ T0 f0_;
+};
+
+template <GTEST_2_TYPENAMES_(T)>
+class GTEST_2_TUPLE_(T) {
+ public:
+ template <int k> friend class gtest_internal::Get;
+
+ tuple() : f0_(), f1_() {}
+
+ explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1) : f0_(f0),
+ f1_(f1) {}
+
+ tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_) {}
+
+ template <GTEST_2_TYPENAMES_(U)>
+ tuple(const GTEST_2_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_) {}
+ template <typename U0, typename U1>
+ tuple(const ::std::pair<U0, U1>& p) : f0_(p.first), f1_(p.second) {}
+
+ tuple& operator=(const tuple& t) { return CopyFrom(t); }
+
+ template <GTEST_2_TYPENAMES_(U)>
+ tuple& operator=(const GTEST_2_TUPLE_(U)& t) {
+ return CopyFrom(t);
+ }
+ template <typename U0, typename U1>
+ tuple& operator=(const ::std::pair<U0, U1>& p) {
+ f0_ = p.first;
+ f1_ = p.second;
+ return *this;
+ }
+
+ GTEST_DECLARE_TUPLE_AS_FRIEND_
+
+ template <GTEST_2_TYPENAMES_(U)>
+ tuple& CopyFrom(const GTEST_2_TUPLE_(U)& t) {
+ f0_ = t.f0_;
+ f1_ = t.f1_;
+ return *this;
+ }
+
+ T0 f0_;
+ T1 f1_;
+};
+
+template <GTEST_3_TYPENAMES_(T)>
+class GTEST_3_TUPLE_(T) {
+ public:
+ template <int k> friend class gtest_internal::Get;
+
+ tuple() : f0_(), f1_(), f2_() {}
+
+ explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1,
+ GTEST_BY_REF_(T2) f2) : f0_(f0), f1_(f1), f2_(f2) {}
+
+ tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_) {}
+
+ template <GTEST_3_TYPENAMES_(U)>
+ tuple(const GTEST_3_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_) {}
+
+ tuple& operator=(const tuple& t) { return CopyFrom(t); }
+
+ template <GTEST_3_TYPENAMES_(U)>
+ tuple& operator=(const GTEST_3_TUPLE_(U)& t) {
+ return CopyFrom(t);
+ }
+
+ GTEST_DECLARE_TUPLE_AS_FRIEND_
+
+ template <GTEST_3_TYPENAMES_(U)>
+ tuple& CopyFrom(const GTEST_3_TUPLE_(U)& t) {
+ f0_ = t.f0_;
+ f1_ = t.f1_;
+ f2_ = t.f2_;
+ return *this;
+ }
+
+ T0 f0_;
+ T1 f1_;
+ T2 f2_;
+};
+
+template <GTEST_4_TYPENAMES_(T)>
+class GTEST_4_TUPLE_(T) {
+ public:
+ template <int k> friend class gtest_internal::Get;
+
+ tuple() : f0_(), f1_(), f2_(), f3_() {}
+
+ explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1,
+ GTEST_BY_REF_(T2) f2, GTEST_BY_REF_(T3) f3) : f0_(f0), f1_(f1), f2_(f2),
+ f3_(f3) {}
+
+ tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), f3_(t.f3_) {}
+
+ template <GTEST_4_TYPENAMES_(U)>
+ tuple(const GTEST_4_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_),
+ f3_(t.f3_) {}
+
+ tuple& operator=(const tuple& t) { return CopyFrom(t); }
+
+ template <GTEST_4_TYPENAMES_(U)>
+ tuple& operator=(const GTEST_4_TUPLE_(U)& t) {
+ return CopyFrom(t);
+ }
+
+ GTEST_DECLARE_TUPLE_AS_FRIEND_
+
+ template <GTEST_4_TYPENAMES_(U)>
+ tuple& CopyFrom(const GTEST_4_TUPLE_(U)& t) {
+ f0_ = t.f0_;
+ f1_ = t.f1_;
+ f2_ = t.f2_;
+ f3_ = t.f3_;
+ return *this;
+ }
+
+ T0 f0_;
+ T1 f1_;
+ T2 f2_;
+ T3 f3_;
+};
+
+template <GTEST_5_TYPENAMES_(T)>
+class GTEST_5_TUPLE_(T) {
+ public:
+ template <int k> friend class gtest_internal::Get;
+
+ tuple() : f0_(), f1_(), f2_(), f3_(), f4_() {}
+
+ explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1,
+ GTEST_BY_REF_(T2) f2, GTEST_BY_REF_(T3) f3,
+ GTEST_BY_REF_(T4) f4) : f0_(f0), f1_(f1), f2_(f2), f3_(f3), f4_(f4) {}
+
+ tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), f3_(t.f3_),
+ f4_(t.f4_) {}
+
+ template <GTEST_5_TYPENAMES_(U)>
+ tuple(const GTEST_5_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_),
+ f3_(t.f3_), f4_(t.f4_) {}
+
+ tuple& operator=(const tuple& t) { return CopyFrom(t); }
+
+ template <GTEST_5_TYPENAMES_(U)>
+ tuple& operator=(const GTEST_5_TUPLE_(U)& t) {
+ return CopyFrom(t);
+ }
+
+ GTEST_DECLARE_TUPLE_AS_FRIEND_
+
+ template <GTEST_5_TYPENAMES_(U)>
+ tuple& CopyFrom(const GTEST_5_TUPLE_(U)& t) {
+ f0_ = t.f0_;
+ f1_ = t.f1_;
+ f2_ = t.f2_;
+ f3_ = t.f3_;
+ f4_ = t.f4_;
+ return *this;
+ }
+
+ T0 f0_;
+ T1 f1_;
+ T2 f2_;
+ T3 f3_;
+ T4 f4_;
+};
+
+template <GTEST_6_TYPENAMES_(T)>
+class GTEST_6_TUPLE_(T) {
+ public:
+ template <int k> friend class gtest_internal::Get;
+
+ tuple() : f0_(), f1_(), f2_(), f3_(), f4_(), f5_() {}
+
+ explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1,
+ GTEST_BY_REF_(T2) f2, GTEST_BY_REF_(T3) f3, GTEST_BY_REF_(T4) f4,
+ GTEST_BY_REF_(T5) f5) : f0_(f0), f1_(f1), f2_(f2), f3_(f3), f4_(f4),
+ f5_(f5) {}
+
+ tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), f3_(t.f3_),
+ f4_(t.f4_), f5_(t.f5_) {}
+
+ template <GTEST_6_TYPENAMES_(U)>
+ tuple(const GTEST_6_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_),
+ f3_(t.f3_), f4_(t.f4_), f5_(t.f5_) {}
+
+ tuple& operator=(const tuple& t) { return CopyFrom(t); }
+
+ template <GTEST_6_TYPENAMES_(U)>
+ tuple& operator=(const GTEST_6_TUPLE_(U)& t) {
+ return CopyFrom(t);
+ }
+
+ GTEST_DECLARE_TUPLE_AS_FRIEND_
+
+ template <GTEST_6_TYPENAMES_(U)>
+ tuple& CopyFrom(const GTEST_6_TUPLE_(U)& t) {
+ f0_ = t.f0_;
+ f1_ = t.f1_;
+ f2_ = t.f2_;
+ f3_ = t.f3_;
+ f4_ = t.f4_;
+ f5_ = t.f5_;
+ return *this;
+ }
+
+ T0 f0_;
+ T1 f1_;
+ T2 f2_;
+ T3 f3_;
+ T4 f4_;
+ T5 f5_;
+};
+
+template <GTEST_7_TYPENAMES_(T)>
+class GTEST_7_TUPLE_(T) {
+ public:
+ template <int k> friend class gtest_internal::Get;
+
+ tuple() : f0_(), f1_(), f2_(), f3_(), f4_(), f5_(), f6_() {}
+
+ explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1,
+ GTEST_BY_REF_(T2) f2, GTEST_BY_REF_(T3) f3, GTEST_BY_REF_(T4) f4,
+ GTEST_BY_REF_(T5) f5, GTEST_BY_REF_(T6) f6) : f0_(f0), f1_(f1), f2_(f2),
+ f3_(f3), f4_(f4), f5_(f5), f6_(f6) {}
+
+ tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), f3_(t.f3_),
+ f4_(t.f4_), f5_(t.f5_), f6_(t.f6_) {}
+
+ template <GTEST_7_TYPENAMES_(U)>
+ tuple(const GTEST_7_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_),
+ f3_(t.f3_), f4_(t.f4_), f5_(t.f5_), f6_(t.f6_) {}
+
+ tuple& operator=(const tuple& t) { return CopyFrom(t); }
+
+ template <GTEST_7_TYPENAMES_(U)>
+ tuple& operator=(const GTEST_7_TUPLE_(U)& t) {
+ return CopyFrom(t);
+ }
+
+ GTEST_DECLARE_TUPLE_AS_FRIEND_
+
+ template <GTEST_7_TYPENAMES_(U)>
+ tuple& CopyFrom(const GTEST_7_TUPLE_(U)& t) {
+ f0_ = t.f0_;
+ f1_ = t.f1_;
+ f2_ = t.f2_;
+ f3_ = t.f3_;
+ f4_ = t.f4_;
+ f5_ = t.f5_;
+ f6_ = t.f6_;
+ return *this;
+ }
+
+ T0 f0_;
+ T1 f1_;
+ T2 f2_;
+ T3 f3_;
+ T4 f4_;
+ T5 f5_;
+ T6 f6_;
+};
+
+template <GTEST_8_TYPENAMES_(T)>
+class GTEST_8_TUPLE_(T) {
+ public:
+ template <int k> friend class gtest_internal::Get;
+
+ tuple() : f0_(), f1_(), f2_(), f3_(), f4_(), f5_(), f6_(), f7_() {}
+
+ explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1,
+ GTEST_BY_REF_(T2) f2, GTEST_BY_REF_(T3) f3, GTEST_BY_REF_(T4) f4,
+ GTEST_BY_REF_(T5) f5, GTEST_BY_REF_(T6) f6,
+ GTEST_BY_REF_(T7) f7) : f0_(f0), f1_(f1), f2_(f2), f3_(f3), f4_(f4),
+ f5_(f5), f6_(f6), f7_(f7) {}
+
+ tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), f3_(t.f3_),
+ f4_(t.f4_), f5_(t.f5_), f6_(t.f6_), f7_(t.f7_) {}
+
+ template <GTEST_8_TYPENAMES_(U)>
+ tuple(const GTEST_8_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_),
+ f3_(t.f3_), f4_(t.f4_), f5_(t.f5_), f6_(t.f6_), f7_(t.f7_) {}
+
+ tuple& operator=(const tuple& t) { return CopyFrom(t); }
+
+ template <GTEST_8_TYPENAMES_(U)>
+ tuple& operator=(const GTEST_8_TUPLE_(U)& t) {
+ return CopyFrom(t);
+ }
+
+ GTEST_DECLARE_TUPLE_AS_FRIEND_
+
+ template <GTEST_8_TYPENAMES_(U)>
+ tuple& CopyFrom(const GTEST_8_TUPLE_(U)& t) {
+ f0_ = t.f0_;
+ f1_ = t.f1_;
+ f2_ = t.f2_;
+ f3_ = t.f3_;
+ f4_ = t.f4_;
+ f5_ = t.f5_;
+ f6_ = t.f6_;
+ f7_ = t.f7_;
+ return *this;
+ }
+
+ T0 f0_;
+ T1 f1_;
+ T2 f2_;
+ T3 f3_;
+ T4 f4_;
+ T5 f5_;
+ T6 f6_;
+ T7 f7_;
+};
+
+template <GTEST_9_TYPENAMES_(T)>
+class GTEST_9_TUPLE_(T) {
+ public:
+ template <int k> friend class gtest_internal::Get;
+
+ tuple() : f0_(), f1_(), f2_(), f3_(), f4_(), f5_(), f6_(), f7_(), f8_() {}
+
+ explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1,
+ GTEST_BY_REF_(T2) f2, GTEST_BY_REF_(T3) f3, GTEST_BY_REF_(T4) f4,
+ GTEST_BY_REF_(T5) f5, GTEST_BY_REF_(T6) f6, GTEST_BY_REF_(T7) f7,
+ GTEST_BY_REF_(T8) f8) : f0_(f0), f1_(f1), f2_(f2), f3_(f3), f4_(f4),
+ f5_(f5), f6_(f6), f7_(f7), f8_(f8) {}
+
+ tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), f3_(t.f3_),
+ f4_(t.f4_), f5_(t.f5_), f6_(t.f6_), f7_(t.f7_), f8_(t.f8_) {}
+
+ template <GTEST_9_TYPENAMES_(U)>
+ tuple(const GTEST_9_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_),
+ f3_(t.f3_), f4_(t.f4_), f5_(t.f5_), f6_(t.f6_), f7_(t.f7_), f8_(t.f8_) {}
+
+ tuple& operator=(const tuple& t) { return CopyFrom(t); }
+
+ template <GTEST_9_TYPENAMES_(U)>
+ tuple& operator=(const GTEST_9_TUPLE_(U)& t) {
+ return CopyFrom(t);
+ }
+
+ GTEST_DECLARE_TUPLE_AS_FRIEND_
+
+ template <GTEST_9_TYPENAMES_(U)>
+ tuple& CopyFrom(const GTEST_9_TUPLE_(U)& t) {
+ f0_ = t.f0_;
+ f1_ = t.f1_;
+ f2_ = t.f2_;
+ f3_ = t.f3_;
+ f4_ = t.f4_;
+ f5_ = t.f5_;
+ f6_ = t.f6_;
+ f7_ = t.f7_;
+ f8_ = t.f8_;
+ return *this;
+ }
+
+ T0 f0_;
+ T1 f1_;
+ T2 f2_;
+ T3 f3_;
+ T4 f4_;
+ T5 f5_;
+ T6 f6_;
+ T7 f7_;
+ T8 f8_;
+};
+
+template <GTEST_10_TYPENAMES_(T)>
+class tuple {
+ public:
+ template <int k> friend class gtest_internal::Get;
+
+ tuple() : f0_(), f1_(), f2_(), f3_(), f4_(), f5_(), f6_(), f7_(), f8_(),
+ f9_() {}
+
+ explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1,
+ GTEST_BY_REF_(T2) f2, GTEST_BY_REF_(T3) f3, GTEST_BY_REF_(T4) f4,
+ GTEST_BY_REF_(T5) f5, GTEST_BY_REF_(T6) f6, GTEST_BY_REF_(T7) f7,
+ GTEST_BY_REF_(T8) f8, GTEST_BY_REF_(T9) f9) : f0_(f0), f1_(f1), f2_(f2),
+ f3_(f3), f4_(f4), f5_(f5), f6_(f6), f7_(f7), f8_(f8), f9_(f9) {}
+
+ tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), f3_(t.f3_),
+ f4_(t.f4_), f5_(t.f5_), f6_(t.f6_), f7_(t.f7_), f8_(t.f8_), f9_(t.f9_) {}
+
+ template <GTEST_10_TYPENAMES_(U)>
+ tuple(const GTEST_10_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_),
+ f3_(t.f3_), f4_(t.f4_), f5_(t.f5_), f6_(t.f6_), f7_(t.f7_), f8_(t.f8_),
+ f9_(t.f9_) {}
+
+ tuple& operator=(const tuple& t) { return CopyFrom(t); }
+
+ template <GTEST_10_TYPENAMES_(U)>
+ tuple& operator=(const GTEST_10_TUPLE_(U)& t) {
+ return CopyFrom(t);
+ }
+
+ GTEST_DECLARE_TUPLE_AS_FRIEND_
+
+ template <GTEST_10_TYPENAMES_(U)>
+ tuple& CopyFrom(const GTEST_10_TUPLE_(U)& t) {
+ f0_ = t.f0_;
+ f1_ = t.f1_;
+ f2_ = t.f2_;
+ f3_ = t.f3_;
+ f4_ = t.f4_;
+ f5_ = t.f5_;
+ f6_ = t.f6_;
+ f7_ = t.f7_;
+ f8_ = t.f8_;
+ f9_ = t.f9_;
+ return *this;
+ }
+
+ T0 f0_;
+ T1 f1_;
+ T2 f2_;
+ T3 f3_;
+ T4 f4_;
+ T5 f5_;
+ T6 f6_;
+ T7 f7_;
+ T8 f8_;
+ T9 f9_;
+};
+
+// 6.1.3.2 Tuple creation functions.
+
+// Known limitations: we don't support passing an
+// std::tr1::reference_wrapper<T> to make_tuple(). And we don't
+// implement tie().
+
+inline tuple<> make_tuple() { return tuple<>(); }
+
+template <GTEST_1_TYPENAMES_(T)>
+inline GTEST_1_TUPLE_(T) make_tuple(const T0& f0) {
+ return GTEST_1_TUPLE_(T)(f0);
+}
+
+template <GTEST_2_TYPENAMES_(T)>
+inline GTEST_2_TUPLE_(T) make_tuple(const T0& f0, const T1& f1) {
+ return GTEST_2_TUPLE_(T)(f0, f1);
+}
+
+template <GTEST_3_TYPENAMES_(T)>
+inline GTEST_3_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2) {
+ return GTEST_3_TUPLE_(T)(f0, f1, f2);
+}
+
+template <GTEST_4_TYPENAMES_(T)>
+inline GTEST_4_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2,
+ const T3& f3) {
+ return GTEST_4_TUPLE_(T)(f0, f1, f2, f3);
+}
+
+template <GTEST_5_TYPENAMES_(T)>
+inline GTEST_5_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2,
+ const T3& f3, const T4& f4) {
+ return GTEST_5_TUPLE_(T)(f0, f1, f2, f3, f4);
+}
+
+template <GTEST_6_TYPENAMES_(T)>
+inline GTEST_6_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2,
+ const T3& f3, const T4& f4, const T5& f5) {
+ return GTEST_6_TUPLE_(T)(f0, f1, f2, f3, f4, f5);
+}
+
+template <GTEST_7_TYPENAMES_(T)>
+inline GTEST_7_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2,
+ const T3& f3, const T4& f4, const T5& f5, const T6& f6) {
+ return GTEST_7_TUPLE_(T)(f0, f1, f2, f3, f4, f5, f6);
+}
+
+template <GTEST_8_TYPENAMES_(T)>
+inline GTEST_8_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2,
+ const T3& f3, const T4& f4, const T5& f5, const T6& f6, const T7& f7) {
+ return GTEST_8_TUPLE_(T)(f0, f1, f2, f3, f4, f5, f6, f7);
+}
+
+template <GTEST_9_TYPENAMES_(T)>
+inline GTEST_9_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2,
+ const T3& f3, const T4& f4, const T5& f5, const T6& f6, const T7& f7,
+ const T8& f8) {
+ return GTEST_9_TUPLE_(T)(f0, f1, f2, f3, f4, f5, f6, f7, f8);
+}
+
+template <GTEST_10_TYPENAMES_(T)>
+inline GTEST_10_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2,
+ const T3& f3, const T4& f4, const T5& f5, const T6& f6, const T7& f7,
+ const T8& f8, const T9& f9) {
+ return GTEST_10_TUPLE_(T)(f0, f1, f2, f3, f4, f5, f6, f7, f8, f9);
+}
+
+// 6.1.3.3 Tuple helper classes.
+
+template <typename Tuple> struct tuple_size;
+
+template <GTEST_0_TYPENAMES_(T)>
+struct tuple_size<GTEST_0_TUPLE_(T)> {
+ static const int value = 0;
+};
+
+template <GTEST_1_TYPENAMES_(T)>
+struct tuple_size<GTEST_1_TUPLE_(T)> {
+ static const int value = 1;
+};
+
+template <GTEST_2_TYPENAMES_(T)>
+struct tuple_size<GTEST_2_TUPLE_(T)> {
+ static const int value = 2;
+};
+
+template <GTEST_3_TYPENAMES_(T)>
+struct tuple_size<GTEST_3_TUPLE_(T)> {
+ static const int value = 3;
+};
+
+template <GTEST_4_TYPENAMES_(T)>
+struct tuple_size<GTEST_4_TUPLE_(T)> {
+ static const int value = 4;
+};
+
+template <GTEST_5_TYPENAMES_(T)>
+struct tuple_size<GTEST_5_TUPLE_(T)> {
+ static const int value = 5;
+};
+
+template <GTEST_6_TYPENAMES_(T)>
+struct tuple_size<GTEST_6_TUPLE_(T)> {
+ static const int value = 6;
+};
+
+template <GTEST_7_TYPENAMES_(T)>
+struct tuple_size<GTEST_7_TUPLE_(T)> {
+ static const int value = 7;
+};
+
+template <GTEST_8_TYPENAMES_(T)>
+struct tuple_size<GTEST_8_TUPLE_(T)> {
+ static const int value = 8;
+};
+
+template <GTEST_9_TYPENAMES_(T)>
+struct tuple_size<GTEST_9_TUPLE_(T)> {
+ static const int value = 9;
+};
+
+template <GTEST_10_TYPENAMES_(T)>
+struct tuple_size<GTEST_10_TUPLE_(T)> {
+ static const int value = 10;
+};
+
+template <int k, class Tuple>
+struct tuple_element {
+ typedef typename gtest_internal::TupleElement<
+ k < (tuple_size<Tuple>::value), k, Tuple>::type type;
+};
+
+#define GTEST_TUPLE_ELEMENT_(k, Tuple) typename tuple_element<k, Tuple >::type
+
+// 6.1.3.4 Element access.
+
+namespace gtest_internal {
+
+template <>
+class Get<0> {
+ public:
+ template <class Tuple>
+ static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(0, Tuple))
+ Field(Tuple& t) { return t.f0_; } // NOLINT
+
+ template <class Tuple>
+ static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(0, Tuple))
+ ConstField(const Tuple& t) { return t.f0_; }
+};
+
+template <>
+class Get<1> {
+ public:
+ template <class Tuple>
+ static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(1, Tuple))
+ Field(Tuple& t) { return t.f1_; } // NOLINT
+
+ template <class Tuple>
+ static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(1, Tuple))
+ ConstField(const Tuple& t) { return t.f1_; }
+};
+
+template <>
+class Get<2> {
+ public:
+ template <class Tuple>
+ static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(2, Tuple))
+ Field(Tuple& t) { return t.f2_; } // NOLINT
+
+ template <class Tuple>
+ static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(2, Tuple))
+ ConstField(const Tuple& t) { return t.f2_; }
+};
+
+template <>
+class Get<3> {
+ public:
+ template <class Tuple>
+ static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(3, Tuple))
+ Field(Tuple& t) { return t.f3_; } // NOLINT
+
+ template <class Tuple>
+ static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(3, Tuple))
+ ConstField(const Tuple& t) { return t.f3_; }
+};
+
+template <>
+class Get<4> {
+ public:
+ template <class Tuple>
+ static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(4, Tuple))
+ Field(Tuple& t) { return t.f4_; } // NOLINT
+
+ template <class Tuple>
+ static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(4, Tuple))
+ ConstField(const Tuple& t) { return t.f4_; }
+};
+
+template <>
+class Get<5> {
+ public:
+ template <class Tuple>
+ static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(5, Tuple))
+ Field(Tuple& t) { return t.f5_; } // NOLINT
+
+ template <class Tuple>
+ static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(5, Tuple))
+ ConstField(const Tuple& t) { return t.f5_; }
+};
+
+template <>
+class Get<6> {
+ public:
+ template <class Tuple>
+ static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(6, Tuple))
+ Field(Tuple& t) { return t.f6_; } // NOLINT
+
+ template <class Tuple>
+ static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(6, Tuple))
+ ConstField(const Tuple& t) { return t.f6_; }
+};
+
+template <>
+class Get<7> {
+ public:
+ template <class Tuple>
+ static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(7, Tuple))
+ Field(Tuple& t) { return t.f7_; } // NOLINT
+
+ template <class Tuple>
+ static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(7, Tuple))
+ ConstField(const Tuple& t) { return t.f7_; }
+};
+
+template <>
+class Get<8> {
+ public:
+ template <class Tuple>
+ static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(8, Tuple))
+ Field(Tuple& t) { return t.f8_; } // NOLINT
+
+ template <class Tuple>
+ static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(8, Tuple))
+ ConstField(const Tuple& t) { return t.f8_; }
+};
+
+template <>
+class Get<9> {
+ public:
+ template <class Tuple>
+ static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(9, Tuple))
+ Field(Tuple& t) { return t.f9_; } // NOLINT
+
+ template <class Tuple>
+ static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(9, Tuple))
+ ConstField(const Tuple& t) { return t.f9_; }
+};
+
+} // namespace gtest_internal
+
+template <int k, GTEST_10_TYPENAMES_(T)>
+GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(k, GTEST_10_TUPLE_(T)))
+get(GTEST_10_TUPLE_(T)& t) {
+ return gtest_internal::Get<k>::Field(t);
+}
+
+template <int k, GTEST_10_TYPENAMES_(T)>
+GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(k, GTEST_10_TUPLE_(T)))
+get(const GTEST_10_TUPLE_(T)& t) {
+ return gtest_internal::Get<k>::ConstField(t);
+}
+
+// 6.1.3.5 Relational operators
+
+// We only implement == and !=, as we don't have a need for the rest yet.
+
+namespace gtest_internal {
+
+// SameSizeTuplePrefixComparator<k, k>::Eq(t1, t2) returns true if the
+// first k fields of t1 equals the first k fields of t2.
+// SameSizeTuplePrefixComparator(k1, k2) would be a compiler error if
+// k1 != k2.
+template <int kSize1, int kSize2>
+struct SameSizeTuplePrefixComparator;
+
+template <>
+struct SameSizeTuplePrefixComparator<0, 0> {
+ template <class Tuple1, class Tuple2>
+ static bool Eq(const Tuple1& /* t1 */, const Tuple2& /* t2 */) {
+ return true;
+ }
+};
+
+template <int k>
+struct SameSizeTuplePrefixComparator<k, k> {
+ template <class Tuple1, class Tuple2>
+ static bool Eq(const Tuple1& t1, const Tuple2& t2) {
+ return SameSizeTuplePrefixComparator<k - 1, k - 1>::Eq(t1, t2) &&
+ ::std::tr1::get<k - 1>(t1) == ::std::tr1::get<k - 1>(t2);
+ }
+};
+
+} // namespace gtest_internal
+
+template <GTEST_10_TYPENAMES_(T), GTEST_10_TYPENAMES_(U)>
+inline bool operator==(const GTEST_10_TUPLE_(T)& t,
+ const GTEST_10_TUPLE_(U)& u) {
+ return gtest_internal::SameSizeTuplePrefixComparator<
+ tuple_size<GTEST_10_TUPLE_(T)>::value,
+ tuple_size<GTEST_10_TUPLE_(U)>::value>::Eq(t, u);
+}
+
+template <GTEST_10_TYPENAMES_(T), GTEST_10_TYPENAMES_(U)>
+inline bool operator!=(const GTEST_10_TUPLE_(T)& t,
+ const GTEST_10_TUPLE_(U)& u) { return !(t == u); }
+
+// 6.1.4 Pairs.
+// Unimplemented.
+
+} // namespace tr1
+} // namespace std
+
+#undef GTEST_0_TUPLE_
+#undef GTEST_1_TUPLE_
+#undef GTEST_2_TUPLE_
+#undef GTEST_3_TUPLE_
+#undef GTEST_4_TUPLE_
+#undef GTEST_5_TUPLE_
+#undef GTEST_6_TUPLE_
+#undef GTEST_7_TUPLE_
+#undef GTEST_8_TUPLE_
+#undef GTEST_9_TUPLE_
+#undef GTEST_10_TUPLE_
+
+#undef GTEST_0_TYPENAMES_
+#undef GTEST_1_TYPENAMES_
+#undef GTEST_2_TYPENAMES_
+#undef GTEST_3_TYPENAMES_
+#undef GTEST_4_TYPENAMES_
+#undef GTEST_5_TYPENAMES_
+#undef GTEST_6_TYPENAMES_
+#undef GTEST_7_TYPENAMES_
+#undef GTEST_8_TYPENAMES_
+#undef GTEST_9_TYPENAMES_
+#undef GTEST_10_TYPENAMES_
+
+#undef GTEST_DECLARE_TUPLE_AS_FRIEND_
+#undef GTEST_BY_REF_
+#undef GTEST_ADD_REF_
+#undef GTEST_TUPLE_ELEMENT_
+
+#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_TUPLE_H_
+# elif GTEST_ENV_HAS_STD_TUPLE_
+# include <tuple>
+// C++11 puts its tuple into the ::std namespace rather than
+// ::std::tr1. gtest expects tuple to live in ::std::tr1, so put it there.
+// This causes undefined behavior, but supported compilers react in
+// the way we intend.
+namespace std {
+namespace tr1 {
+using ::std::get;
+using ::std::make_tuple;
+using ::std::tuple;
+using ::std::tuple_element;
+using ::std::tuple_size;
+}
+}
+
+# elif GTEST_OS_SYMBIAN
+
+// On Symbian, BOOST_HAS_TR1_TUPLE causes Boost's TR1 tuple library to
+// use STLport's tuple implementation, which unfortunately doesn't
+// work as the copy of STLport distributed with Symbian is incomplete.
+// By making sure BOOST_HAS_TR1_TUPLE is undefined, we force Boost to
+// use its own tuple implementation.
+# ifdef BOOST_HAS_TR1_TUPLE
+# undef BOOST_HAS_TR1_TUPLE
+# endif // BOOST_HAS_TR1_TUPLE
+
+// This prevents <boost/tr1/detail/config.hpp>, which defines
+// BOOST_HAS_TR1_TUPLE, from being #included by Boost's <tuple>.
+# define BOOST_TR1_DETAIL_CONFIG_HPP_INCLUDED
+# include <tuple>
+
+# elif defined(__GNUC__) && (GTEST_GCC_VER_ >= 40000)
+// GCC 4.0+ implements tr1/tuple in the <tr1/tuple> header. This does
+// not conform to the TR1 spec, which requires the header to be <tuple>.
+
+# if !GTEST_HAS_RTTI && GTEST_GCC_VER_ < 40302
+// Until version 4.3.2, gcc has a bug that causes <tr1/functional>,
+// which is #included by <tr1/tuple>, to not compile when RTTI is
+// disabled. _TR1_FUNCTIONAL is the header guard for
+// <tr1/functional>. Hence the following #define is a hack to prevent
+// <tr1/functional> from being included.
+# define _TR1_FUNCTIONAL 1
+# include <tr1/tuple>
+# undef _TR1_FUNCTIONAL // Allows the user to #include
+ // <tr1/functional> if he chooses to.
+# else
+# include <tr1/tuple> // NOLINT
+# endif // !GTEST_HAS_RTTI && GTEST_GCC_VER_ < 40302
+
+# else
+// If the compiler is not GCC 4.0+, we assume the user is using a
+// spec-conforming TR1 implementation.
+# include <tuple> // NOLINT
+# endif // GTEST_USE_OWN_TR1_TUPLE
+
+#endif // GTEST_HAS_TR1_TUPLE
+
+// Determines whether clone(2) is supported.
+// Usually it will only be available on Linux, excluding
+// Linux on the Itanium architecture.
+// Also see http://linux.die.net/man/2/clone.
+#ifndef GTEST_HAS_CLONE
+// The user didn't tell us, so we need to figure it out.
+
+# if GTEST_OS_LINUX && !defined(__ia64__)
+# if GTEST_OS_LINUX_ANDROID
+// On Android, clone() is only available on ARM starting with Gingerbread.
+# if (defined(__arm__) || defined(__mips__)) && __ANDROID_API__ >= 9
+# define GTEST_HAS_CLONE 1
+# else
+# define GTEST_HAS_CLONE 0
+# endif
+# else
+# define GTEST_HAS_CLONE 1
+# endif
+# else
+# define GTEST_HAS_CLONE 0
+# endif // GTEST_OS_LINUX && !defined(__ia64__)
+
+#endif // GTEST_HAS_CLONE
+
+// Determines whether to support stream redirection. This is used to test
+// output correctness and to implement death tests.
+#ifndef GTEST_HAS_STREAM_REDIRECTION
+// By default, we assume that stream redirection is supported on all
+// platforms except known mobile ones.
+# if GTEST_OS_WINDOWS_MOBILE || GTEST_OS_SYMBIAN
+# define GTEST_HAS_STREAM_REDIRECTION 0
+# else
+# define GTEST_HAS_STREAM_REDIRECTION 1
+# endif // !GTEST_OS_WINDOWS_MOBILE && !GTEST_OS_SYMBIAN
+#endif // GTEST_HAS_STREAM_REDIRECTION
+
+// Determines whether to support death tests.
+// Google Test does not support death tests for VC 7.1 and earlier as
+// abort() in a VC 7.1 application compiled as GUI in debug config
+// pops up a dialog window that cannot be suppressed programmatically.
+#if (GTEST_OS_LINUX || GTEST_OS_CYGWIN || GTEST_OS_SOLARIS || \
+ (GTEST_OS_MAC && !GTEST_OS_IOS) || GTEST_OS_IOS_SIMULATOR || \
+ (GTEST_OS_WINDOWS_DESKTOP && _MSC_VER >= 1400) || \
+ GTEST_OS_WINDOWS_MINGW || GTEST_OS_AIX || GTEST_OS_HPUX || \
+ GTEST_OS_OPENBSD || GTEST_OS_QNX)
+# define GTEST_HAS_DEATH_TEST 1
+# include <vector> // NOLINT
+#else
+# define GTEST_HAS_DEATH_TEST 0
+#endif
+
+// We don't support MSVC 7.1 with exceptions disabled now. Therefore
+// all the compilers we care about are adequate for supporting
+// value-parameterized tests.
+#define GTEST_HAS_PARAM_TEST 1
+
+// Determines whether to support type-driven tests.
+
+// Typed tests need <typeinfo> and variadic macros, which GCC, VC++ 8.0,
+// Sun Pro CC, IBM Visual Age, and HP aCC support.
+#if defined(__GNUC__) || (_MSC_VER >= 1400) || defined(__SUNPRO_CC) || \
+ defined(__IBMCPP__) || defined(__HP_aCC)
+# define GTEST_HAS_TYPED_TEST 1
+# define GTEST_HAS_TYPED_TEST_P 1
+#endif
+
+// Determines whether to support Combine(). This only makes sense when
+// value-parameterized tests are enabled. The implementation doesn't
+// work on Sun Studio since it doesn't understand templated conversion
+// operators.
+#if GTEST_HAS_PARAM_TEST && GTEST_HAS_TR1_TUPLE && !defined(__SUNPRO_CC)
+# define GTEST_HAS_COMBINE 1
+#endif
+
+// Determines whether the system compiler uses UTF-16 for encoding wide strings.
+#define GTEST_WIDE_STRING_USES_UTF16_ \
+ (GTEST_OS_WINDOWS || GTEST_OS_CYGWIN || GTEST_OS_SYMBIAN || GTEST_OS_AIX)
+
+// Determines whether test results can be streamed to a socket.
+#if GTEST_OS_LINUX
+# define GTEST_CAN_STREAM_RESULTS_ 1
+#else
+# define GTEST_CAN_STREAM_RESULTS_ 0
+#endif
+
+// Defines some utility macros.
+
+// The GNU compiler emits a warning if nested "if" statements are followed by
+// an "else" statement and braces are not used to explicitly disambiguate the
+// "else" binding. This leads to problems with code like:
+//
+// if (gate)
+// ASSERT_*(condition) << "Some message";
+//
+// The "switch (0) case 0:" idiom is used to suppress this.
+#ifdef __INTEL_COMPILER
+# define GTEST_AMBIGUOUS_ELSE_BLOCKER_
+#else
+# define GTEST_AMBIGUOUS_ELSE_BLOCKER_ switch (0) case 0: default: // NOLINT
+#endif
+
+// Use this annotation at the end of a struct/class definition to
+// prevent the compiler from optimizing away instances that are never
+// used. This is useful when all interesting logic happens inside the
+// c'tor and / or d'tor. Example:
+//
+// struct Foo {
+// Foo() { ... }
+// } GTEST_ATTRIBUTE_UNUSED_;
+//
+// Also use it after a variable or parameter declaration to tell the
+// compiler the variable/parameter does not have to be used.
+#if defined(__GNUC__) && !defined(COMPILER_ICC)
+# define GTEST_ATTRIBUTE_UNUSED_ __attribute__ ((unused))
+#else
+# define GTEST_ATTRIBUTE_UNUSED_
+#endif
+
+// A macro to disallow operator=
+// This should be used in the private: declarations for a class.
+#define GTEST_DISALLOW_ASSIGN_(type)\
+ void operator=(type const &)
+
+// A macro to disallow copy constructor and operator=
+// This should be used in the private: declarations for a class.
+#define GTEST_DISALLOW_COPY_AND_ASSIGN_(type)\
+ type(type const &);\
+ GTEST_DISALLOW_ASSIGN_(type)
+
+// Tell the compiler to warn about unused return values for functions declared
+// with this macro. The macro should be used on function declarations
+// following the argument list:
+//
+// Sprocket* AllocateSprocket() GTEST_MUST_USE_RESULT_;
+#if defined(__GNUC__) && (GTEST_GCC_VER_ >= 30400) && !defined(COMPILER_ICC)
+# define GTEST_MUST_USE_RESULT_ __attribute__ ((warn_unused_result))
+#else
+# define GTEST_MUST_USE_RESULT_
+#endif // __GNUC__ && (GTEST_GCC_VER_ >= 30400) && !COMPILER_ICC
+
+// Determine whether the compiler supports Microsoft's Structured Exception
+// Handling. This is supported by several Windows compilers but generally
+// does not exist on any other system.
+#ifndef GTEST_HAS_SEH
+// The user didn't tell us, so we need to figure it out.
+
+# if defined(_MSC_VER) || defined(__BORLANDC__)
+// These two compilers are known to support SEH.
+# define GTEST_HAS_SEH 1
+# else
+// Assume no SEH.
+# define GTEST_HAS_SEH 0
+# endif
+
+#endif // GTEST_HAS_SEH
+
+#ifdef _MSC_VER
+
+# if GTEST_LINKED_AS_SHARED_LIBRARY
+# define GTEST_API_ __declspec(dllimport)
+# elif GTEST_CREATE_SHARED_LIBRARY
+# define GTEST_API_ __declspec(dllexport)
+# endif
+
+#endif // _MSC_VER
+
+#ifndef GTEST_API_
+# define GTEST_API_
+#endif
+
+#ifdef __GNUC__
+// Ask the compiler to never inline a given function.
+# define GTEST_NO_INLINE_ __attribute__((noinline))
+#else
+# define GTEST_NO_INLINE_
+#endif
+
+// _LIBCPP_VERSION is defined by the libc++ library from the LLVM project.
+#if defined(__GLIBCXX__) || defined(_LIBCPP_VERSION)
+# define GTEST_HAS_CXXABI_H_ 1
+#else
+# define GTEST_HAS_CXXABI_H_ 0
+#endif
+
+namespace testing {
+
+class Message;
+
+namespace internal {
+
+// A secret type that Google Test users don't know about. It has no
+// definition on purpose. Therefore it's impossible to create a
+// Secret object, which is what we want.
+class Secret;
+
+// The GTEST_COMPILE_ASSERT_ macro can be used to verify that a compile time
+// expression is true. For example, you could use it to verify the
+// size of a static array:
+//
+// GTEST_COMPILE_ASSERT_(ARRAYSIZE(content_type_names) == CONTENT_NUM_TYPES,
+// content_type_names_incorrect_size);
+//
+// or to make sure a struct is smaller than a certain size:
+//
+// GTEST_COMPILE_ASSERT_(sizeof(foo) < 128, foo_too_large);
+//
+// The second argument to the macro is the name of the variable. If
+// the expression is false, most compilers will issue a warning/error
+// containing the name of the variable.
+
+template <bool>
+struct CompileAssert {
+};
+
+#define GTEST_COMPILE_ASSERT_(expr, msg) \
+ typedef ::testing::internal::CompileAssert<(static_cast<bool>(expr))> \
+ msg[static_cast<bool>(expr) ? 1 : -1] GTEST_ATTRIBUTE_UNUSED_
+
+// Implementation details of GTEST_COMPILE_ASSERT_:
+//
+// - GTEST_COMPILE_ASSERT_ works by defining an array type that has -1
+// elements (and thus is invalid) when the expression is false.
+//
+// - The simpler definition
+//
+// #define GTEST_COMPILE_ASSERT_(expr, msg) typedef char msg[(expr) ? 1 : -1]
+//
+// does not work, as gcc supports variable-length arrays whose sizes
+// are determined at run-time (this is gcc's extension and not part
+// of the C++ standard). As a result, gcc fails to reject the
+// following code with the simple definition:
+//
+// int foo;
+// GTEST_COMPILE_ASSERT_(foo, msg); // not supposed to compile as foo is
+// // not a compile-time constant.
+//
+// - By using the type CompileAssert<(bool(expr))>, we ensures that
+// expr is a compile-time constant. (Template arguments must be
+// determined at compile-time.)
+//
+// - The outter parentheses in CompileAssert<(bool(expr))> are necessary
+// to work around a bug in gcc 3.4.4 and 4.0.1. If we had written
+//
+// CompileAssert<bool(expr)>
+//
+// instead, these compilers will refuse to compile
+//
+// GTEST_COMPILE_ASSERT_(5 > 0, some_message);
+//
+// (They seem to think the ">" in "5 > 0" marks the end of the
+// template argument list.)
+//
+// - The array size is (bool(expr) ? 1 : -1), instead of simply
+//
+// ((expr) ? 1 : -1).
+//
+// This is to avoid running into a bug in MS VC 7.1, which
+// causes ((0.0) ? 1 : -1) to incorrectly evaluate to 1.
+
+// StaticAssertTypeEqHelper is used by StaticAssertTypeEq defined in gtest.h.
+//
+// This template is declared, but intentionally undefined.
+template <typename T1, typename T2>
+struct StaticAssertTypeEqHelper;
+
+template <typename T>
+struct StaticAssertTypeEqHelper<T, T> {};
+
+#if GTEST_HAS_GLOBAL_STRING
+typedef ::string string;
+#else
+typedef ::std::string string;
+#endif // GTEST_HAS_GLOBAL_STRING
+
+#if GTEST_HAS_GLOBAL_WSTRING
+typedef ::wstring wstring;
+#elif GTEST_HAS_STD_WSTRING
+typedef ::std::wstring wstring;
+#endif // GTEST_HAS_GLOBAL_WSTRING
+
+// A helper for suppressing warnings on constant condition. It just
+// returns 'condition'.
+GTEST_API_ bool IsTrue(bool condition);
+
+// Defines scoped_ptr.
+
+// This implementation of scoped_ptr is PARTIAL - it only contains
+// enough stuff to satisfy Google Test's need.
+template <typename T>
+class scoped_ptr {
+ public:
+ typedef T element_type;
+
+ explicit scoped_ptr(T* p = NULL) : ptr_(p) {}
+ ~scoped_ptr() { reset(); }
+
+ T& operator*() const { return *ptr_; }
+ T* operator->() const { return ptr_; }
+ T* get() const { return ptr_; }
+
+ T* release() {
+ T* const ptr = ptr_;
+ ptr_ = NULL;
+ return ptr;
+ }
+
+ void reset(T* p = NULL) {
+ if (p != ptr_) {
+ if (IsTrue(sizeof(T) > 0)) { // Makes sure T is a complete type.
+ delete ptr_;
+ }
+ ptr_ = p;
+ }
+ }
+
+ private:
+ T* ptr_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(scoped_ptr);
+};
+
+// Defines RE.
+
+// A simple C++ wrapper for <regex.h>. It uses the POSIX Extended
+// Regular Expression syntax.
+class GTEST_API_ RE {
+ public:
+ // A copy constructor is required by the Standard to initialize object
+ // references from r-values.
+ RE(const RE& other) { Init(other.pattern()); }
+
+ // Constructs an RE from a string.
+ RE(const ::std::string& regex) { Init(regex.c_str()); } // NOLINT
+
+#if GTEST_HAS_GLOBAL_STRING
+
+ RE(const ::string& regex) { Init(regex.c_str()); } // NOLINT
+
+#endif // GTEST_HAS_GLOBAL_STRING
+
+ RE(const char* regex) { Init(regex); } // NOLINT
+ ~RE();
+
+ // Returns the string representation of the regex.
+ const char* pattern() const { return pattern_; }
+
+ // FullMatch(str, re) returns true iff regular expression re matches
+ // the entire str.
+ // PartialMatch(str, re) returns true iff regular expression re
+ // matches a substring of str (including str itself).
+ //
+ // TODO(wan@google.com): make FullMatch() and PartialMatch() work
+ // when str contains NUL characters.
+ static bool FullMatch(const ::std::string& str, const RE& re) {
+ return FullMatch(str.c_str(), re);
+ }
+ static bool PartialMatch(const ::std::string& str, const RE& re) {
+ return PartialMatch(str.c_str(), re);
+ }
+
+#if GTEST_HAS_GLOBAL_STRING
+
+ static bool FullMatch(const ::string& str, const RE& re) {
+ return FullMatch(str.c_str(), re);
+ }
+ static bool PartialMatch(const ::string& str, const RE& re) {
+ return PartialMatch(str.c_str(), re);
+ }
+
+#endif // GTEST_HAS_GLOBAL_STRING
+
+ static bool FullMatch(const char* str, const RE& re);
+ static bool PartialMatch(const char* str, const RE& re);
+
+ private:
+ void Init(const char* regex);
+
+ // We use a const char* instead of an std::string, as Google Test used to be
+ // used where std::string is not available. TODO(wan@google.com): change to
+ // std::string.
+ const char* pattern_;
+ bool is_valid_;
+
+#if GTEST_USES_POSIX_RE
+
+ regex_t full_regex_; // For FullMatch().
+ regex_t partial_regex_; // For PartialMatch().
+
+#else // GTEST_USES_SIMPLE_RE
+
+ const char* full_pattern_; // For FullMatch();
+
+#endif
+
+ GTEST_DISALLOW_ASSIGN_(RE);
+};
+
+// Formats a source file path and a line number as they would appear
+// in an error message from the compiler used to compile this code.
+GTEST_API_ ::std::string FormatFileLocation(const char* file, int line);
+
+// Formats a file location for compiler-independent XML output.
+// Although this function is not platform dependent, we put it next to
+// FormatFileLocation in order to contrast the two functions.
+GTEST_API_ ::std::string FormatCompilerIndependentFileLocation(const char* file,
+ int line);
+
+// Defines logging utilities:
+// GTEST_LOG_(severity) - logs messages at the specified severity level. The
+// message itself is streamed into the macro.
+// LogToStderr() - directs all log messages to stderr.
+// FlushInfoLog() - flushes informational log messages.
+
+enum GTestLogSeverity {
+ GTEST_INFO,
+ GTEST_WARNING,
+ GTEST_ERROR,
+ GTEST_FATAL
+};
+
+// Formats log entry severity, provides a stream object for streaming the
+// log message, and terminates the message with a newline when going out of
+// scope.
+class GTEST_API_ GTestLog {
+ public:
+ GTestLog(GTestLogSeverity severity, const char* file, int line);
+
+ // Flushes the buffers and, if severity is GTEST_FATAL, aborts the program.
+ ~GTestLog();
+
+ ::std::ostream& GetStream() { return ::std::cerr; }
+
+ private:
+ const GTestLogSeverity severity_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(GTestLog);
+};
+
+#define GTEST_LOG_(severity) \
+ ::testing::internal::GTestLog(::testing::internal::GTEST_##severity, \
+ __FILE__, __LINE__).GetStream()
+
+inline void LogToStderr() {}
+inline void FlushInfoLog() { fflush(NULL); }
+
+// INTERNAL IMPLEMENTATION - DO NOT USE.
+//
+// GTEST_CHECK_ is an all-mode assert. It aborts the program if the condition
+// is not satisfied.
+// Synopsys:
+// GTEST_CHECK_(boolean_condition);
+// or
+// GTEST_CHECK_(boolean_condition) << "Additional message";
+//
+// This checks the condition and if the condition is not satisfied
+// it prints message about the condition violation, including the
+// condition itself, plus additional message streamed into it, if any,
+// and then it aborts the program. It aborts the program irrespective of
+// whether it is built in the debug mode or not.
+#define GTEST_CHECK_(condition) \
+ GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
+ if (::testing::internal::IsTrue(condition)) \
+ ; \
+ else \
+ GTEST_LOG_(FATAL) << "Condition " #condition " failed. "
+
+// An all-mode assert to verify that the given POSIX-style function
+// call returns 0 (indicating success). Known limitation: this
+// doesn't expand to a balanced 'if' statement, so enclose the macro
+// in {} if you need to use it as the only statement in an 'if'
+// branch.
+#define GTEST_CHECK_POSIX_SUCCESS_(posix_call) \
+ if (const int gtest_error = (posix_call)) \
+ GTEST_LOG_(FATAL) << #posix_call << "failed with error " \
+ << gtest_error
+
+// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
+//
+// Use ImplicitCast_ as a safe version of static_cast for upcasting in
+// the type hierarchy (e.g. casting a Foo* to a SuperclassOfFoo* or a
+// const Foo*). When you use ImplicitCast_, the compiler checks that
+// the cast is safe. Such explicit ImplicitCast_s are necessary in
+// surprisingly many situations where C++ demands an exact type match
+// instead of an argument type convertable to a target type.
+//
+// The syntax for using ImplicitCast_ is the same as for static_cast:
+//
+// ImplicitCast_<ToType>(expr)
+//
+// ImplicitCast_ would have been part of the C++ standard library,
+// but the proposal was submitted too late. It will probably make
+// its way into the language in the future.
+//
+// This relatively ugly name is intentional. It prevents clashes with
+// similar functions users may have (e.g., implicit_cast). The internal
+// namespace alone is not enough because the function can be found by ADL.
+template<typename To>
+inline To ImplicitCast_(To x) { return x; }
+
+// When you upcast (that is, cast a pointer from type Foo to type
+// SuperclassOfFoo), it's fine to use ImplicitCast_<>, since upcasts
+// always succeed. When you downcast (that is, cast a pointer from
+// type Foo to type SubclassOfFoo), static_cast<> isn't safe, because
+// how do you know the pointer is really of type SubclassOfFoo? It
+// could be a bare Foo, or of type DifferentSubclassOfFoo. Thus,
+// when you downcast, you should use this macro. In debug mode, we
+// use dynamic_cast<> to double-check the downcast is legal (we die
+// if it's not). In normal mode, we do the efficient static_cast<>
+// instead. Thus, it's important to test in debug mode to make sure
+// the cast is legal!
+// This is the only place in the code we should use dynamic_cast<>.
+// In particular, you SHOULDN'T be using dynamic_cast<> in order to
+// do RTTI (eg code like this:
+// if (dynamic_cast<Subclass1>(foo)) HandleASubclass1Object(foo);
+// if (dynamic_cast<Subclass2>(foo)) HandleASubclass2Object(foo);
+// You should design the code some other way not to need this.
+//
+// This relatively ugly name is intentional. It prevents clashes with
+// similar functions users may have (e.g., down_cast). The internal
+// namespace alone is not enough because the function can be found by ADL.
+template<typename To, typename From> // use like this: DownCast_<T*>(foo);
+inline To DownCast_(From* f) { // so we only accept pointers
+ // Ensures that To is a sub-type of From *. This test is here only
+ // for compile-time type checking, and has no overhead in an
+ // optimized build at run-time, as it will be optimized away
+ // completely.
+ if (false) {
+ const To to = NULL;
+ ::testing::internal::ImplicitCast_<From*>(to);
+ }
+
+#if GTEST_HAS_RTTI
+ // RTTI: debug mode only!
+ GTEST_CHECK_(f == NULL || dynamic_cast<To>(f) != NULL);
+#endif
+ return static_cast<To>(f);
+}
+
+// Downcasts the pointer of type Base to Derived.
+// Derived must be a subclass of Base. The parameter MUST
+// point to a class of type Derived, not any subclass of it.
+// When RTTI is available, the function performs a runtime
+// check to enforce this.
+template <class Derived, class Base>
+Derived* CheckedDowncastToActualType(Base* base) {
+#if GTEST_HAS_RTTI
+ GTEST_CHECK_(typeid(*base) == typeid(Derived));
+ return dynamic_cast<Derived*>(base); // NOLINT
+#else
+ return static_cast<Derived*>(base); // Poor man's downcast.
+#endif
+}
+
+#if GTEST_HAS_STREAM_REDIRECTION
+
+// Defines the stderr capturer:
+// CaptureStdout - starts capturing stdout.
+// GetCapturedStdout - stops capturing stdout and returns the captured string.
+// CaptureStderr - starts capturing stderr.
+// GetCapturedStderr - stops capturing stderr and returns the captured string.
+//
+GTEST_API_ void CaptureStdout();
+GTEST_API_ std::string GetCapturedStdout();
+GTEST_API_ void CaptureStderr();
+GTEST_API_ std::string GetCapturedStderr();
+
+#endif // GTEST_HAS_STREAM_REDIRECTION
+
+
+#if GTEST_HAS_DEATH_TEST
+
+const ::std::vector<testing::internal::string>& GetInjectableArgvs();
+void SetInjectableArgvs(const ::std::vector<testing::internal::string>*
+ new_argvs);
+
+// A copy of all command line arguments. Set by InitGoogleTest().
+extern ::std::vector<testing::internal::string> g_argvs;
+
+#endif // GTEST_HAS_DEATH_TEST
+
+// Defines synchronization primitives.
+
+#if GTEST_HAS_PTHREAD
+
+// Sleeps for (roughly) n milli-seconds. This function is only for
+// testing Google Test's own constructs. Don't use it in user tests,
+// either directly or indirectly.
+inline void SleepMilliseconds(int n) {
+ const timespec time = {
+ 0, // 0 seconds.
+ n * 1000L * 1000L, // And n ms.
+ };
+ nanosleep(&time, NULL);
+}
+
+// Allows a controller thread to pause execution of newly created
+// threads until notified. Instances of this class must be created
+// and destroyed in the controller thread.
+//
+// This class is only for testing Google Test's own constructs. Do not
+// use it in user tests, either directly or indirectly.
+class Notification {
+ public:
+ Notification() : notified_(false) {
+ GTEST_CHECK_POSIX_SUCCESS_(pthread_mutex_init(&mutex_, NULL));
+ }
+ ~Notification() {
+ pthread_mutex_destroy(&mutex_);
+ }
+
+ // Notifies all threads created with this notification to start. Must
+ // be called from the controller thread.
+ void Notify() {
+ pthread_mutex_lock(&mutex_);
+ notified_ = true;
+ pthread_mutex_unlock(&mutex_);
+ }
+
+ // Blocks until the controller thread notifies. Must be called from a test
+ // thread.
+ void WaitForNotification() {
+ for (;;) {
+ pthread_mutex_lock(&mutex_);
+ const bool notified = notified_;
+ pthread_mutex_unlock(&mutex_);
+ if (notified)
+ break;
+ SleepMilliseconds(10);
+ }
+ }
+
+ private:
+ pthread_mutex_t mutex_;
+ bool notified_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(Notification);
+};
+
+// As a C-function, ThreadFuncWithCLinkage cannot be templated itself.
+// Consequently, it cannot select a correct instantiation of ThreadWithParam
+// in order to call its Run(). Introducing ThreadWithParamBase as a
+// non-templated base class for ThreadWithParam allows us to bypass this
+// problem.
+class ThreadWithParamBase {
+ public:
+ virtual ~ThreadWithParamBase() {}
+ virtual void Run() = 0;
+};
+
+// pthread_create() accepts a pointer to a function type with the C linkage.
+// According to the Standard (7.5/1), function types with different linkages
+// are different even if they are otherwise identical. Some compilers (for
+// example, SunStudio) treat them as different types. Since class methods
+// cannot be defined with C-linkage we need to define a free C-function to
+// pass into pthread_create().
+extern "C" inline void* ThreadFuncWithCLinkage(void* thread) {
+ static_cast<ThreadWithParamBase*>(thread)->Run();
+ return NULL;
+}
+
+// Helper class for testing Google Test's multi-threading constructs.
+// To use it, write:
+//
+// void ThreadFunc(int param) { /* Do things with param */ }
+// Notification thread_can_start;
+// ...
+// // The thread_can_start parameter is optional; you can supply NULL.
+// ThreadWithParam<int> thread(&ThreadFunc, 5, &thread_can_start);
+// thread_can_start.Notify();
+//
+// These classes are only for testing Google Test's own constructs. Do
+// not use them in user tests, either directly or indirectly.
+template <typename T>
+class ThreadWithParam : public ThreadWithParamBase {
+ public:
+ typedef void (*UserThreadFunc)(T);
+
+ ThreadWithParam(
+ UserThreadFunc func, T param, Notification* thread_can_start)
+ : func_(func),
+ param_(param),
+ thread_can_start_(thread_can_start),
+ finished_(false) {
+ ThreadWithParamBase* const base = this;
+ // The thread can be created only after all fields except thread_
+ // have been initialized.
+ GTEST_CHECK_POSIX_SUCCESS_(
+ pthread_create(&thread_, 0, &ThreadFuncWithCLinkage, base));
+ }
+ ~ThreadWithParam() { Join(); }
+
+ void Join() {
+ if (!finished_) {
+ GTEST_CHECK_POSIX_SUCCESS_(pthread_join(thread_, 0));
+ finished_ = true;
+ }
+ }
+
+ virtual void Run() {
+ if (thread_can_start_ != NULL)
+ thread_can_start_->WaitForNotification();
+ func_(param_);
+ }
+
+ private:
+ const UserThreadFunc func_; // User-supplied thread function.
+ const T param_; // User-supplied parameter to the thread function.
+ // When non-NULL, used to block execution until the controller thread
+ // notifies.
+ Notification* const thread_can_start_;
+ bool finished_; // true iff we know that the thread function has finished.
+ pthread_t thread_; // The native thread object.
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(ThreadWithParam);
+};
+
+// MutexBase and Mutex implement mutex on pthreads-based platforms. They
+// are used in conjunction with class MutexLock:
+//
+// Mutex mutex;
+// ...
+// MutexLock lock(&mutex); // Acquires the mutex and releases it at the end
+// // of the current scope.
+//
+// MutexBase implements behavior for both statically and dynamically
+// allocated mutexes. Do not use MutexBase directly. Instead, write
+// the following to define a static mutex:
+//
+// GTEST_DEFINE_STATIC_MUTEX_(g_some_mutex);
+//
+// You can forward declare a static mutex like this:
+//
+// GTEST_DECLARE_STATIC_MUTEX_(g_some_mutex);
+//
+// To create a dynamic mutex, just define an object of type Mutex.
+class MutexBase {
+ public:
+ // Acquires this mutex.
+ void Lock() {
+ GTEST_CHECK_POSIX_SUCCESS_(pthread_mutex_lock(&mutex_));
+ owner_ = pthread_self();
+ has_owner_ = true;
+ }
+
+ // Releases this mutex.
+ void Unlock() {
+ // Since the lock is being released the owner_ field should no longer be
+ // considered valid. We don't protect writing to has_owner_ here, as it's
+ // the caller's responsibility to ensure that the current thread holds the
+ // mutex when this is called.
+ has_owner_ = false;
+ GTEST_CHECK_POSIX_SUCCESS_(pthread_mutex_unlock(&mutex_));
+ }
+
+ // Does nothing if the current thread holds the mutex. Otherwise, crashes
+ // with high probability.
+ void AssertHeld() const {
+ GTEST_CHECK_(has_owner_ && pthread_equal(owner_, pthread_self()))
+ << "The current thread is not holding the mutex @" << this;
+ }
+
+ // A static mutex may be used before main() is entered. It may even
+ // be used before the dynamic initialization stage. Therefore we
+ // must be able to initialize a static mutex object at link time.
+ // This means MutexBase has to be a POD and its member variables
+ // have to be public.
+ public:
+ pthread_mutex_t mutex_; // The underlying pthread mutex.
+ // has_owner_ indicates whether the owner_ field below contains a valid thread
+ // ID and is therefore safe to inspect (e.g., to use in pthread_equal()). All
+ // accesses to the owner_ field should be protected by a check of this field.
+ // An alternative might be to memset() owner_ to all zeros, but there's no
+ // guarantee that a zero'd pthread_t is necessarily invalid or even different
+ // from pthread_self().
+ bool has_owner_;
+ pthread_t owner_; // The thread holding the mutex.
+};
+
+// Forward-declares a static mutex.
+# define GTEST_DECLARE_STATIC_MUTEX_(mutex) \
+ extern ::testing::internal::MutexBase mutex
+
+// Defines and statically (i.e. at link time) initializes a static mutex.
+// The initialization list here does not explicitly initialize each field,
+// instead relying on default initialization for the unspecified fields. In
+// particular, the owner_ field (a pthread_t) is not explicitly initialized.
+// This allows initialization to work whether pthread_t is a scalar or struct.
+// The flag -Wmissing-field-initializers must not be specified for this to work.
+# define GTEST_DEFINE_STATIC_MUTEX_(mutex) \
+ ::testing::internal::MutexBase mutex = { PTHREAD_MUTEX_INITIALIZER, false }
+
+// The Mutex class can only be used for mutexes created at runtime. It
+// shares its API with MutexBase otherwise.
+class Mutex : public MutexBase {
+ public:
+ Mutex() {
+ GTEST_CHECK_POSIX_SUCCESS_(pthread_mutex_init(&mutex_, NULL));
+ has_owner_ = false;
+ }
+ ~Mutex() {
+ GTEST_CHECK_POSIX_SUCCESS_(pthread_mutex_destroy(&mutex_));
+ }
+
+ private:
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(Mutex);
+};
+
+// We cannot name this class MutexLock as the ctor declaration would
+// conflict with a macro named MutexLock, which is defined on some
+// platforms. Hence the typedef trick below.
+class GTestMutexLock {
+ public:
+ explicit GTestMutexLock(MutexBase* mutex)
+ : mutex_(mutex) { mutex_->Lock(); }
+
+ ~GTestMutexLock() { mutex_->Unlock(); }
+
+ private:
+ MutexBase* const mutex_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(GTestMutexLock);
+};
+
+typedef GTestMutexLock MutexLock;
+
+// Helpers for ThreadLocal.
+
+// pthread_key_create() requires DeleteThreadLocalValue() to have
+// C-linkage. Therefore it cannot be templatized to access
+// ThreadLocal<T>. Hence the need for class
+// ThreadLocalValueHolderBase.
+class ThreadLocalValueHolderBase {
+ public:
+ virtual ~ThreadLocalValueHolderBase() {}
+};
+
+// Called by pthread to delete thread-local data stored by
+// pthread_setspecific().
+extern "C" inline void DeleteThreadLocalValue(void* value_holder) {
+ delete static_cast<ThreadLocalValueHolderBase*>(value_holder);
+}
+
+// Implements thread-local storage on pthreads-based systems.
+//
+// // Thread 1
+// ThreadLocal<int> tl(100); // 100 is the default value for each thread.
+//
+// // Thread 2
+// tl.set(150); // Changes the value for thread 2 only.
+// EXPECT_EQ(150, tl.get());
+//
+// // Thread 1
+// EXPECT_EQ(100, tl.get()); // In thread 1, tl has the original value.
+// tl.set(200);
+// EXPECT_EQ(200, tl.get());
+//
+// The template type argument T must have a public copy constructor.
+// In addition, the default ThreadLocal constructor requires T to have
+// a public default constructor.
+//
+// An object managed for a thread by a ThreadLocal instance is deleted
+// when the thread exits. Or, if the ThreadLocal instance dies in
+// that thread, when the ThreadLocal dies. It's the user's
+// responsibility to ensure that all other threads using a ThreadLocal
+// have exited when it dies, or the per-thread objects for those
+// threads will not be deleted.
+//
+// Google Test only uses global ThreadLocal objects. That means they
+// will die after main() has returned. Therefore, no per-thread
+// object managed by Google Test will be leaked as long as all threads
+// using Google Test have exited when main() returns.
+template <typename T>
+class ThreadLocal {
+ public:
+ ThreadLocal() : key_(CreateKey()),
+ default_() {}
+ explicit ThreadLocal(const T& value) : key_(CreateKey()),
+ default_(value) {}
+
+ ~ThreadLocal() {
+ // Destroys the managed object for the current thread, if any.
+ DeleteThreadLocalValue(pthread_getspecific(key_));
+
+ // Releases resources associated with the key. This will *not*
+ // delete managed objects for other threads.
+ GTEST_CHECK_POSIX_SUCCESS_(pthread_key_delete(key_));
+ }
+
+ T* pointer() { return GetOrCreateValue(); }
+ const T* pointer() const { return GetOrCreateValue(); }
+ const T& get() const { return *pointer(); }
+ void set(const T& value) { *pointer() = value; }
+
+ private:
+ // Holds a value of type T.
+ class ValueHolder : public ThreadLocalValueHolderBase {
+ public:
+ explicit ValueHolder(const T& value) : value_(value) {}
+
+ T* pointer() { return &value_; }
+
+ private:
+ T value_;
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(ValueHolder);
+ };
+
+ static pthread_key_t CreateKey() {
+ pthread_key_t key;
+ // When a thread exits, DeleteThreadLocalValue() will be called on
+ // the object managed for that thread.
+ GTEST_CHECK_POSIX_SUCCESS_(
+ pthread_key_create(&key, &DeleteThreadLocalValue));
+ return key;
+ }
+
+ T* GetOrCreateValue() const {
+ ThreadLocalValueHolderBase* const holder =
+ static_cast<ThreadLocalValueHolderBase*>(pthread_getspecific(key_));
+ if (holder != NULL) {
+ return CheckedDowncastToActualType<ValueHolder>(holder)->pointer();
+ }
+
+ ValueHolder* const new_holder = new ValueHolder(default_);
+ ThreadLocalValueHolderBase* const holder_base = new_holder;
+ GTEST_CHECK_POSIX_SUCCESS_(pthread_setspecific(key_, holder_base));
+ return new_holder->pointer();
+ }
+
+ // A key pthreads uses for looking up per-thread values.
+ const pthread_key_t key_;
+ const T default_; // The default value for each thread.
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(ThreadLocal);
+};
+
+# define GTEST_IS_THREADSAFE 1
+
+#else // GTEST_HAS_PTHREAD
+
+// A dummy implementation of synchronization primitives (mutex, lock,
+// and thread-local variable). Necessary for compiling Google Test where
+// mutex is not supported - using Google Test in multiple threads is not
+// supported on such platforms.
+
+class Mutex {
+ public:
+ Mutex() {}
+ void Lock() {}
+ void Unlock() {}
+ void AssertHeld() const {}
+};
+
+# define GTEST_DECLARE_STATIC_MUTEX_(mutex) \
+ extern ::testing::internal::Mutex mutex
+
+# define GTEST_DEFINE_STATIC_MUTEX_(mutex) ::testing::internal::Mutex mutex
+
+class GTestMutexLock {
+ public:
+ explicit GTestMutexLock(Mutex*) {} // NOLINT
+};
+
+typedef GTestMutexLock MutexLock;
+
+template <typename T>
+class ThreadLocal {
+ public:
+ ThreadLocal() : value_() {}
+ explicit ThreadLocal(const T& value) : value_(value) {}
+ T* pointer() { return &value_; }
+ const T* pointer() const { return &value_; }
+ const T& get() const { return value_; }
+ void set(const T& value) { value_ = value; }
+ private:
+ T value_;
+};
+
+// The above synchronization primitives have dummy implementations.
+// Therefore Google Test is not thread-safe.
+# define GTEST_IS_THREADSAFE 0
+
+#endif // GTEST_HAS_PTHREAD
+
+// Returns the number of threads running in the process, or 0 to indicate that
+// we cannot detect it.
+GTEST_API_ size_t GetThreadCount();
+
+// Passing non-POD classes through ellipsis (...) crashes the ARM
+// compiler and generates a warning in Sun Studio. The Nokia Symbian
+// and the IBM XL C/C++ compiler try to instantiate a copy constructor
+// for objects passed through ellipsis (...), failing for uncopyable
+// objects. We define this to ensure that only POD is passed through
+// ellipsis on these systems.
+#if defined(__SYMBIAN32__) || defined(__IBMCPP__) || defined(__SUNPRO_CC)
+// We lose support for NULL detection where the compiler doesn't like
+// passing non-POD classes through ellipsis (...).
+# define GTEST_ELLIPSIS_NEEDS_POD_ 1
+#else
+# define GTEST_CAN_COMPARE_NULL 1
+#endif
+
+// The Nokia Symbian and IBM XL C/C++ compilers cannot decide between
+// const T& and const T* in a function template. These compilers
+// _can_ decide between class template specializations for T and T*,
+// so a tr1::type_traits-like is_pointer works.
+#if defined(__SYMBIAN32__) || defined(__IBMCPP__)
+# define GTEST_NEEDS_IS_POINTER_ 1
+#endif
+
+template <bool bool_value>
+struct bool_constant {
+ typedef bool_constant<bool_value> type;
+ static const bool value = bool_value;
+};
+template <bool bool_value> const bool bool_constant<bool_value>::value;
+
+typedef bool_constant<false> false_type;
+typedef bool_constant<true> true_type;
+
+template <typename T>
+struct is_pointer : public false_type {};
+
+template <typename T>
+struct is_pointer<T*> : public true_type {};
+
+template <typename Iterator>
+struct IteratorTraits {
+ typedef typename Iterator::value_type value_type;
+};
+
+template <typename T>
+struct IteratorTraits<T*> {
+ typedef T value_type;
+};
+
+template <typename T>
+struct IteratorTraits<const T*> {
+ typedef T value_type;
+};
+
+#if GTEST_OS_WINDOWS
+# define GTEST_PATH_SEP_ "\\"
+# define GTEST_HAS_ALT_PATH_SEP_ 1
+// The biggest signed integer type the compiler supports.
+typedef __int64 BiggestInt;
+#else
+# define GTEST_PATH_SEP_ "/"
+# define GTEST_HAS_ALT_PATH_SEP_ 0
+typedef long long BiggestInt; // NOLINT
+#endif // GTEST_OS_WINDOWS
+
+// Utilities for char.
+
+// isspace(int ch) and friends accept an unsigned char or EOF. char
+// may be signed, depending on the compiler (or compiler flags).
+// Therefore we need to cast a char to unsigned char before calling
+// isspace(), etc.
+
+inline bool IsAlpha(char ch) {
+ return isalpha(static_cast<unsigned char>(ch)) != 0;
+}
+inline bool IsAlNum(char ch) {
+ return isalnum(static_cast<unsigned char>(ch)) != 0;
+}
+inline bool IsDigit(char ch) {
+ return isdigit(static_cast<unsigned char>(ch)) != 0;
+}
+inline bool IsLower(char ch) {
+ return islower(static_cast<unsigned char>(ch)) != 0;
+}
+inline bool IsSpace(char ch) {
+ return isspace(static_cast<unsigned char>(ch)) != 0;
+}
+inline bool IsUpper(char ch) {
+ return isupper(static_cast<unsigned char>(ch)) != 0;
+}
+inline bool IsXDigit(char ch) {
+ return isxdigit(static_cast<unsigned char>(ch)) != 0;
+}
+inline bool IsXDigit(wchar_t ch) {
+ const unsigned char low_byte = static_cast<unsigned char>(ch);
+ return ch == low_byte && isxdigit(low_byte) != 0;
+}
+
+inline char ToLower(char ch) {
+ return static_cast<char>(tolower(static_cast<unsigned char>(ch)));
+}
+inline char ToUpper(char ch) {
+ return static_cast<char>(toupper(static_cast<unsigned char>(ch)));
+}
+
+// The testing::internal::posix namespace holds wrappers for common
+// POSIX functions. These wrappers hide the differences between
+// Windows/MSVC and POSIX systems. Since some compilers define these
+// standard functions as macros, the wrapper cannot have the same name
+// as the wrapped function.
+
+namespace posix {
+
+// Functions with a different name on Windows.
+
+#if GTEST_OS_WINDOWS
+
+typedef struct _stat StatStruct;
+
+# ifdef __BORLANDC__
+inline int IsATTY(int fd) { return isatty(fd); }
+inline int StrCaseCmp(const char* s1, const char* s2) {
+ return stricmp(s1, s2);
+}
+inline char* StrDup(const char* src) { return strdup(src); }
+# else // !__BORLANDC__
+# if GTEST_OS_WINDOWS_MOBILE
+inline int IsATTY(int /* fd */) { return 0; }
+# else
+inline int IsATTY(int fd) { return _isatty(fd); }
+# endif // GTEST_OS_WINDOWS_MOBILE
+inline int StrCaseCmp(const char* s1, const char* s2) {
+ return _stricmp(s1, s2);
+}
+inline char* StrDup(const char* src) { return _strdup(src); }
+# endif // __BORLANDC__
+
+# if GTEST_OS_WINDOWS_MOBILE
+inline int FileNo(FILE* file) { return reinterpret_cast<int>(_fileno(file)); }
+// Stat(), RmDir(), and IsDir() are not needed on Windows CE at this
+// time and thus not defined there.
+# else
+inline int FileNo(FILE* file) { return _fileno(file); }
+inline int Stat(const char* path, StatStruct* buf) { return _stat(path, buf); }
+inline int RmDir(const char* dir) { return _rmdir(dir); }
+inline bool IsDir(const StatStruct& st) {
+ return (_S_IFDIR & st.st_mode) != 0;
+}
+# endif // GTEST_OS_WINDOWS_MOBILE
+
+#else
+
+typedef struct stat StatStruct;
+
+inline int FileNo(FILE* file) { return fileno(file); }
+inline int IsATTY(int fd) { return isatty(fd); }
+inline int Stat(const char* path, StatStruct* buf) { return stat(path, buf); }
+inline int StrCaseCmp(const char* s1, const char* s2) {
+ return strcasecmp(s1, s2);
+}
+inline char* StrDup(const char* src) { return strdup(src); }
+inline int RmDir(const char* dir) { return rmdir(dir); }
+inline bool IsDir(const StatStruct& st) { return S_ISDIR(st.st_mode); }
+
+#endif // GTEST_OS_WINDOWS
+
+// Functions deprecated by MSVC 8.0.
+
+#ifdef _MSC_VER
+// Temporarily disable warning 4996 (deprecated function).
+# pragma warning(push)
+# pragma warning(disable:4996)
+#endif
+
+inline const char* StrNCpy(char* dest, const char* src, size_t n) {
+ return strncpy(dest, src, n);
+}
+
+// ChDir(), FReopen(), FDOpen(), Read(), Write(), Close(), and
+// StrError() aren't needed on Windows CE at this time and thus not
+// defined there.
+
+#if !GTEST_OS_WINDOWS_MOBILE
+inline int ChDir(const char* dir) { return chdir(dir); }
+#endif
+inline FILE* FOpen(const char* path, const char* mode) {
+ return fopen(path, mode);
+}
+#if !GTEST_OS_WINDOWS_MOBILE
+inline FILE *FReopen(const char* path, const char* mode, FILE* stream) {
+ return freopen(path, mode, stream);
+}
+inline FILE* FDOpen(int fd, const char* mode) { return fdopen(fd, mode); }
+#endif
+inline int FClose(FILE* fp) { return fclose(fp); }
+#if !GTEST_OS_WINDOWS_MOBILE
+inline int Read(int fd, void* buf, unsigned int count) {
+ return static_cast<int>(read(fd, buf, count));
+}
+inline int Write(int fd, const void* buf, unsigned int count) {
+ return static_cast<int>(write(fd, buf, count));
+}
+inline int Close(int fd) { return close(fd); }
+inline const char* StrError(int errnum) { return strerror(errnum); }
+#endif
+inline const char* GetEnv(const char* name) {
+#if GTEST_OS_WINDOWS_MOBILE
+ // We are on Windows CE, which has no environment variables.
+ return NULL;
+#elif defined(__BORLANDC__) || defined(__SunOS_5_8) || defined(__SunOS_5_9)
+ // Environment variables which we programmatically clear will be set to the
+ // empty string rather than unset (NULL). Handle that case.
+ const char* const env = getenv(name);
+ return (env != NULL && env[0] != '\0') ? env : NULL;
+#else
+ return getenv(name);
+#endif
+}
+
+#ifdef _MSC_VER
+# pragma warning(pop) // Restores the warning state.
+#endif
+
+#if GTEST_OS_WINDOWS_MOBILE
+// Windows CE has no C library. The abort() function is used in
+// several places in Google Test. This implementation provides a reasonable
+// imitation of standard behaviour.
+void Abort();
+#else
+inline void Abort() { abort(); }
+#endif // GTEST_OS_WINDOWS_MOBILE
+
+} // namespace posix
+
+// MSVC "deprecates" snprintf and issues warnings wherever it is used. In
+// order to avoid these warnings, we need to use _snprintf or _snprintf_s on
+// MSVC-based platforms. We map the GTEST_SNPRINTF_ macro to the appropriate
+// function in order to achieve that. We use macro definition here because
+// snprintf is a variadic function.
+#if defined(_MSC_VER) && _MSC_VER >= 1400 && !GTEST_OS_WINDOWS_MOBILE
+// MSVC 2005 and above support variadic macros.
+# define GTEST_SNPRINTF_(buffer, size, format, ...) \
+ _snprintf_s(buffer, size, size, format, __VA_ARGS__)
+#elif defined(_MSC_VER)
+// Windows CE does not define _snprintf_s and MSVC prior to 2005 doesn't
+// complain about _snprintf.
+# define GTEST_SNPRINTF_ _snprintf
+#else
+# define GTEST_SNPRINTF_ snprintf
+#endif
+
+// The maximum number a BiggestInt can represent. This definition
+// works no matter BiggestInt is represented in one's complement or
+// two's complement.
+//
+// We cannot rely on numeric_limits in STL, as __int64 and long long
+// are not part of standard C++ and numeric_limits doesn't need to be
+// defined for them.
+const BiggestInt kMaxBiggestInt =
+ ~(static_cast<BiggestInt>(1) << (8*sizeof(BiggestInt) - 1));
+
+// This template class serves as a compile-time function from size to
+// type. It maps a size in bytes to a primitive type with that
+// size. e.g.
+//
+// TypeWithSize<4>::UInt
+//
+// is typedef-ed to be unsigned int (unsigned integer made up of 4
+// bytes).
+//
+// Such functionality should belong to STL, but I cannot find it
+// there.
+//
+// Google Test uses this class in the implementation of floating-point
+// comparison.
+//
+// For now it only handles UInt (unsigned int) as that's all Google Test
+// needs. Other types can be easily added in the future if need
+// arises.
+template <size_t size>
+class TypeWithSize {
+ public:
+ // This prevents the user from using TypeWithSize<N> with incorrect
+ // values of N.
+ typedef void UInt;
+};
+
+// The specialization for size 4.
+template <>
+class TypeWithSize<4> {
+ public:
+ // unsigned int has size 4 in both gcc and MSVC.
+ //
+ // As base/basictypes.h doesn't compile on Windows, we cannot use
+ // uint32, uint64, and etc here.
+ typedef int Int;
+ typedef unsigned int UInt;
+};
+
+// The specialization for size 8.
+template <>
+class TypeWithSize<8> {
+ public:
+#if GTEST_OS_WINDOWS
+ typedef __int64 Int;
+ typedef unsigned __int64 UInt;
+#else
+ typedef long long Int; // NOLINT
+ typedef unsigned long long UInt; // NOLINT
+#endif // GTEST_OS_WINDOWS
+};
+
+// Integer types of known sizes.
+typedef TypeWithSize<4>::Int Int32;
+typedef TypeWithSize<4>::UInt UInt32;
+typedef TypeWithSize<8>::Int Int64;
+typedef TypeWithSize<8>::UInt UInt64;
+typedef TypeWithSize<8>::Int TimeInMillis; // Represents time in milliseconds.
+
+// Utilities for command line flags and environment variables.
+
+// Macro for referencing flags.
+#define GTEST_FLAG(name) FLAGS_gtest_##name
+
+// Macros for declaring flags.
+#define GTEST_DECLARE_bool_(name) GTEST_API_ extern bool GTEST_FLAG(name)
+#define GTEST_DECLARE_int32_(name) \
+ GTEST_API_ extern ::testing::internal::Int32 GTEST_FLAG(name)
+#define GTEST_DECLARE_string_(name) \
+ GTEST_API_ extern ::std::string GTEST_FLAG(name)
+
+// Macros for defining flags.
+#define GTEST_DEFINE_bool_(name, default_val, doc) \
+ GTEST_API_ bool GTEST_FLAG(name) = (default_val)
+#define GTEST_DEFINE_int32_(name, default_val, doc) \
+ GTEST_API_ ::testing::internal::Int32 GTEST_FLAG(name) = (default_val)
+#define GTEST_DEFINE_string_(name, default_val, doc) \
+ GTEST_API_ ::std::string GTEST_FLAG(name) = (default_val)
+
+// Thread annotations
+#define GTEST_EXCLUSIVE_LOCK_REQUIRED_(locks)
+#define GTEST_LOCK_EXCLUDED_(locks)
+
+// Parses 'str' for a 32-bit signed integer. If successful, writes the result
+// to *value and returns true; otherwise leaves *value unchanged and returns
+// false.
+// TODO(chandlerc): Find a better way to refactor flag and environment parsing
+// out of both gtest-port.cc and gtest.cc to avoid exporting this utility
+// function.
+bool ParseInt32(const Message& src_text, const char* str, Int32* value);
+
+// Parses a bool/Int32/string from the environment variable
+// corresponding to the given Google Test flag.
+bool BoolFromGTestEnv(const char* flag, bool default_val);
+GTEST_API_ Int32 Int32FromGTestEnv(const char* flag, Int32 default_val);
+const char* StringFromGTestEnv(const char* flag, const char* default_val);
+
+} // namespace internal
+} // namespace testing
+
+#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PORT_H_
+
+#if GTEST_OS_LINUX
+# include <stdlib.h>
+# include <sys/types.h>
+# include <sys/wait.h>
+# include <unistd.h>
+#endif // GTEST_OS_LINUX
+
+#if GTEST_HAS_EXCEPTIONS
+# include <stdexcept>
+#endif
+
+#include <ctype.h>
+#include <float.h>
+#include <string.h>
+#include <iomanip>
+#include <limits>
+#include <set>
+
+// Copyright 2005, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan)
+//
+// The Google C++ Testing Framework (Google Test)
+//
+// This header file defines the Message class.
+//
+// IMPORTANT NOTE: Due to limitation of the C++ language, we have to
+// leave some internal implementation details in this header file.
+// They are clearly marked by comments like this:
+//
+// // INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+//
+// Such code is NOT meant to be used by a user directly, and is subject
+// to CHANGE WITHOUT NOTICE. Therefore DO NOT DEPEND ON IT in a user
+// program!
+
+#ifndef GTEST_INCLUDE_GTEST_GTEST_MESSAGE_H_
+#define GTEST_INCLUDE_GTEST_GTEST_MESSAGE_H_
+
+#include <limits>
+
+
+// Ensures that there is at least one operator<< in the global namespace.
+// See Message& operator<<(...) below for why.
+void operator<<(const testing::internal::Secret&, int);
+
+namespace testing {
+
+// The Message class works like an ostream repeater.
+//
+// Typical usage:
+//
+// 1. You stream a bunch of values to a Message object.
+// It will remember the text in a stringstream.
+// 2. Then you stream the Message object to an ostream.
+// This causes the text in the Message to be streamed
+// to the ostream.
+//
+// For example;
+//
+// testing::Message foo;
+// foo << 1 << " != " << 2;
+// std::cout << foo;
+//
+// will print "1 != 2".
+//
+// Message is not intended to be inherited from. In particular, its
+// destructor is not virtual.
+//
+// Note that stringstream behaves differently in gcc and in MSVC. You
+// can stream a NULL char pointer to it in the former, but not in the
+// latter (it causes an access violation if you do). The Message
+// class hides this difference by treating a NULL char pointer as
+// "(null)".
+class GTEST_API_ Message {
+ private:
+ // The type of basic IO manipulators (endl, ends, and flush) for
+ // narrow streams.
+ typedef std::ostream& (*BasicNarrowIoManip)(std::ostream&);
+
+ public:
+ // Constructs an empty Message.
+ Message();
+
+ // Copy constructor.
+ Message(const Message& msg) : ss_(new ::std::stringstream) { // NOLINT
+ *ss_ << msg.GetString();
+ }
+
+ // Constructs a Message from a C-string.
+ explicit Message(const char* str) : ss_(new ::std::stringstream) {
+ *ss_ << str;
+ }
+
+#if GTEST_OS_SYMBIAN
+ // Streams a value (either a pointer or not) to this object.
+ template <typename T>
+ inline Message& operator <<(const T& value) {
+ StreamHelper(typename internal::is_pointer<T>::type(), value);
+ return *this;
+ }
+#else
+ // Streams a non-pointer value to this object.
+ template <typename T>
+ inline Message& operator <<(const T& val) {
+ // Some libraries overload << for STL containers. These
+ // overloads are defined in the global namespace instead of ::std.
+ //
+ // C++'s symbol lookup rule (i.e. Koenig lookup) says that these
+ // overloads are visible in either the std namespace or the global
+ // namespace, but not other namespaces, including the testing
+ // namespace which Google Test's Message class is in.
+ //
+ // To allow STL containers (and other types that has a << operator
+ // defined in the global namespace) to be used in Google Test
+ // assertions, testing::Message must access the custom << operator
+ // from the global namespace. With this using declaration,
+ // overloads of << defined in the global namespace and those
+ // visible via Koenig lookup are both exposed in this function.
+ using ::operator <<;
+ *ss_ << val;
+ return *this;
+ }
+
+ // Streams a pointer value to this object.
+ //
+ // This function is an overload of the previous one. When you
+ // stream a pointer to a Message, this definition will be used as it
+ // is more specialized. (The C++ Standard, section
+ // [temp.func.order].) If you stream a non-pointer, then the
+ // previous definition will be used.
+ //
+ // The reason for this overload is that streaming a NULL pointer to
+ // ostream is undefined behavior. Depending on the compiler, you
+ // may get "0", "(nil)", "(null)", or an access violation. To
+ // ensure consistent result across compilers, we always treat NULL
+ // as "(null)".
+ template <typename T>
+ inline Message& operator <<(T* const& pointer) { // NOLINT
+ if (pointer == NULL) {
+ *ss_ << "(null)";
+ } else {
+ *ss_ << pointer;
+ }
+ return *this;
+ }
+#endif // GTEST_OS_SYMBIAN
+
+ // Since the basic IO manipulators are overloaded for both narrow
+ // and wide streams, we have to provide this specialized definition
+ // of operator <<, even though its body is the same as the
+ // templatized version above. Without this definition, streaming
+ // endl or other basic IO manipulators to Message will confuse the
+ // compiler.
+ Message& operator <<(BasicNarrowIoManip val) {
+ *ss_ << val;
+ return *this;
+ }
+
+ // Instead of 1/0, we want to see true/false for bool values.
+ Message& operator <<(bool b) {
+ return *this << (b ? "true" : "false");
+ }
+
+ // These two overloads allow streaming a wide C string to a Message
+ // using the UTF-8 encoding.
+ Message& operator <<(const wchar_t* wide_c_str);
+ Message& operator <<(wchar_t* wide_c_str);
+
+#if GTEST_HAS_STD_WSTRING
+ // Converts the given wide string to a narrow string using the UTF-8
+ // encoding, and streams the result to this Message object.
+ Message& operator <<(const ::std::wstring& wstr);
+#endif // GTEST_HAS_STD_WSTRING
+
+#if GTEST_HAS_GLOBAL_WSTRING
+ // Converts the given wide string to a narrow string using the UTF-8
+ // encoding, and streams the result to this Message object.
+ Message& operator <<(const ::wstring& wstr);
+#endif // GTEST_HAS_GLOBAL_WSTRING
+
+ // Gets the text streamed to this object so far as an std::string.
+ // Each '\0' character in the buffer is replaced with "\\0".
+ //
+ // INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+ std::string GetString() const;
+
+ private:
+
+#if GTEST_OS_SYMBIAN
+ // These are needed as the Nokia Symbian Compiler cannot decide between
+ // const T& and const T* in a function template. The Nokia compiler _can_
+ // decide between class template specializations for T and T*, so a
+ // tr1::type_traits-like is_pointer works, and we can overload on that.
+ template <typename T>
+ inline void StreamHelper(internal::true_type /*is_pointer*/, T* pointer) {
+ if (pointer == NULL) {
+ *ss_ << "(null)";
+ } else {
+ *ss_ << pointer;
+ }
+ }
+ template <typename T>
+ inline void StreamHelper(internal::false_type /*is_pointer*/,
+ const T& value) {
+ // See the comments in Message& operator <<(const T&) above for why
+ // we need this using statement.
+ using ::operator <<;
+ *ss_ << value;
+ }
+#endif // GTEST_OS_SYMBIAN
+
+ // We'll hold the text streamed to this object here.
+ const internal::scoped_ptr< ::std::stringstream> ss_;
+
+ // We declare (but don't implement) this to prevent the compiler
+ // from implementing the assignment operator.
+ void operator=(const Message&);
+};
+
+// Streams a Message to an ostream.
+inline std::ostream& operator <<(std::ostream& os, const Message& sb) {
+ return os << sb.GetString();
+}
+
+namespace internal {
+
+// Converts a streamable value to an std::string. A NULL pointer is
+// converted to "(null)". When the input value is a ::string,
+// ::std::string, ::wstring, or ::std::wstring object, each NUL
+// character in it is replaced with "\\0".
+template <typename T>
+std::string StreamableToString(const T& streamable) {
+ return (Message() << streamable).GetString();
+}
+
+} // namespace internal
+} // namespace testing
+
+#endif // GTEST_INCLUDE_GTEST_GTEST_MESSAGE_H_
+// Copyright 2005, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Authors: wan@google.com (Zhanyong Wan), eefacm@gmail.com (Sean Mcafee)
+//
+// The Google C++ Testing Framework (Google Test)
+//
+// This header file declares the String class and functions used internally by
+// Google Test. They are subject to change without notice. They should not used
+// by code external to Google Test.
+//
+// This header file is #included by <gtest/internal/gtest-internal.h>.
+// It should not be #included by other files.
+
+#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_STRING_H_
+#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_STRING_H_
+
+#ifdef __BORLANDC__
+// string.h is not guaranteed to provide strcpy on C++ Builder.
+# include <mem.h>
+#endif
+
+#include <string.h>
+#include <string>
+
+
+namespace testing {
+namespace internal {
+
+// String - an abstract class holding static string utilities.
+class GTEST_API_ String {
+ public:
+ // Static utility methods
+
+ // Clones a 0-terminated C string, allocating memory using new. The
+ // caller is responsible for deleting the return value using
+ // delete[]. Returns the cloned string, or NULL if the input is
+ // NULL.
+ //
+ // This is different from strdup() in string.h, which allocates
+ // memory using malloc().
+ static const char* CloneCString(const char* c_str);
+
+#if GTEST_OS_WINDOWS_MOBILE
+ // Windows CE does not have the 'ANSI' versions of Win32 APIs. To be
+ // able to pass strings to Win32 APIs on CE we need to convert them
+ // to 'Unicode', UTF-16.
+
+ // Creates a UTF-16 wide string from the given ANSI string, allocating
+ // memory using new. The caller is responsible for deleting the return
+ // value using delete[]. Returns the wide string, or NULL if the
+ // input is NULL.
+ //
+ // The wide string is created using the ANSI codepage (CP_ACP) to
+ // match the behaviour of the ANSI versions of Win32 calls and the
+ // C runtime.
+ static LPCWSTR AnsiToUtf16(const char* c_str);
+
+ // Creates an ANSI string from the given wide string, allocating
+ // memory using new. The caller is responsible for deleting the return
+ // value using delete[]. Returns the ANSI string, or NULL if the
+ // input is NULL.
+ //
+ // The returned string is created using the ANSI codepage (CP_ACP) to
+ // match the behaviour of the ANSI versions of Win32 calls and the
+ // C runtime.
+ static const char* Utf16ToAnsi(LPCWSTR utf16_str);
+#endif
+
+ // Compares two C strings. Returns true iff they have the same content.
+ //
+ // Unlike strcmp(), this function can handle NULL argument(s). A
+ // NULL C string is considered different to any non-NULL C string,
+ // including the empty string.
+ static bool CStringEquals(const char* lhs, const char* rhs);
+
+ // Converts a wide C string to a String using the UTF-8 encoding.
+ // NULL will be converted to "(null)". If an error occurred during
+ // the conversion, "(failed to convert from wide string)" is
+ // returned.
+ static std::string ShowWideCString(const wchar_t* wide_c_str);
+
+ // Compares two wide C strings. Returns true iff they have the same
+ // content.
+ //
+ // Unlike wcscmp(), this function can handle NULL argument(s). A
+ // NULL C string is considered different to any non-NULL C string,
+ // including the empty string.
+ static bool WideCStringEquals(const wchar_t* lhs, const wchar_t* rhs);
+
+ // Compares two C strings, ignoring case. Returns true iff they
+ // have the same content.
+ //
+ // Unlike strcasecmp(), this function can handle NULL argument(s).
+ // A NULL C string is considered different to any non-NULL C string,
+ // including the empty string.
+ static bool CaseInsensitiveCStringEquals(const char* lhs,
+ const char* rhs);
+
+ // Compares two wide C strings, ignoring case. Returns true iff they
+ // have the same content.
+ //
+ // Unlike wcscasecmp(), this function can handle NULL argument(s).
+ // A NULL C string is considered different to any non-NULL wide C string,
+ // including the empty string.
+ // NB: The implementations on different platforms slightly differ.
+ // On windows, this method uses _wcsicmp which compares according to LC_CTYPE
+ // environment variable. On GNU platform this method uses wcscasecmp
+ // which compares according to LC_CTYPE category of the current locale.
+ // On MacOS X, it uses towlower, which also uses LC_CTYPE category of the
+ // current locale.
+ static bool CaseInsensitiveWideCStringEquals(const wchar_t* lhs,
+ const wchar_t* rhs);
+
+ // Returns true iff the given string ends with the given suffix, ignoring
+ // case. Any string is considered to end with an empty suffix.
+ static bool EndsWithCaseInsensitive(
+ const std::string& str, const std::string& suffix);
+
+ // Formats an int value as "%02d".
+ static std::string FormatIntWidth2(int value); // "%02d" for width == 2
+
+ // Formats an int value as "%X".
+ static std::string FormatHexInt(int value);
+
+ // Formats a byte as "%02X".
+ static std::string FormatByte(unsigned char value);
+
+ private:
+ String(); // Not meant to be instantiated.
+}; // class String
+
+// Gets the content of the stringstream's buffer as an std::string. Each '\0'
+// character in the buffer is replaced with "\\0".
+GTEST_API_ std::string StringStreamToString(::std::stringstream* stream);
+
+} // namespace internal
+} // namespace testing
+
+#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_STRING_H_
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keith.ray@gmail.com (Keith Ray)
+//
+// Google Test filepath utilities
+//
+// This header file declares classes and functions used internally by
+// Google Test. They are subject to change without notice.
+//
+// This file is #included in <gtest/internal/gtest-internal.h>.
+// Do not include this header file separately!
+
+#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_FILEPATH_H_
+#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_FILEPATH_H_
+
+
+namespace testing {
+namespace internal {
+
+// FilePath - a class for file and directory pathname manipulation which
+// handles platform-specific conventions (like the pathname separator).
+// Used for helper functions for naming files in a directory for xml output.
+// Except for Set methods, all methods are const or static, which provides an
+// "immutable value object" -- useful for peace of mind.
+// A FilePath with a value ending in a path separator ("like/this/") represents
+// a directory, otherwise it is assumed to represent a file. In either case,
+// it may or may not represent an actual file or directory in the file system.
+// Names are NOT checked for syntax correctness -- no checking for illegal
+// characters, malformed paths, etc.
+
+class GTEST_API_ FilePath {
+ public:
+ FilePath() : pathname_("") { }
+ FilePath(const FilePath& rhs) : pathname_(rhs.pathname_) { }
+
+ explicit FilePath(const std::string& pathname) : pathname_(pathname) {
+ Normalize();
+ }
+
+ FilePath& operator=(const FilePath& rhs) {
+ Set(rhs);
+ return *this;
+ }
+
+ void Set(const FilePath& rhs) {
+ pathname_ = rhs.pathname_;
+ }
+
+ const std::string& string() const { return pathname_; }
+ const char* c_str() const { return pathname_.c_str(); }
+
+ // Returns the current working directory, or "" if unsuccessful.
+ static FilePath GetCurrentDir();
+
+ // Given directory = "dir", base_name = "test", number = 0,
+ // extension = "xml", returns "dir/test.xml". If number is greater
+ // than zero (e.g., 12), returns "dir/test_12.xml".
+ // On Windows platform, uses \ as the separator rather than /.
+ static FilePath MakeFileName(const FilePath& directory,
+ const FilePath& base_name,
+ int number,
+ const char* extension);
+
+ // Given directory = "dir", relative_path = "test.xml",
+ // returns "dir/test.xml".
+ // On Windows, uses \ as the separator rather than /.
+ static FilePath ConcatPaths(const FilePath& directory,
+ const FilePath& relative_path);
+
+ // Returns a pathname for a file that does not currently exist. The pathname
+ // will be directory/base_name.extension or
+ // directory/base_name_<number>.extension if directory/base_name.extension
+ // already exists. The number will be incremented until a pathname is found
+ // that does not already exist.
+ // Examples: 'dir/foo_test.xml' or 'dir/foo_test_1.xml'.
+ // There could be a race condition if two or more processes are calling this
+ // function at the same time -- they could both pick the same filename.
+ static FilePath GenerateUniqueFileName(const FilePath& directory,
+ const FilePath& base_name,
+ const char* extension);
+
+ // Returns true iff the path is "".
+ bool IsEmpty() const { return pathname_.empty(); }
+
+ // If input name has a trailing separator character, removes it and returns
+ // the name, otherwise return the name string unmodified.
+ // On Windows platform, uses \ as the separator, other platforms use /.
+ FilePath RemoveTrailingPathSeparator() const;
+
+ // Returns a copy of the FilePath with the directory part removed.
+ // Example: FilePath("path/to/file").RemoveDirectoryName() returns
+ // FilePath("file"). If there is no directory part ("just_a_file"), it returns
+ // the FilePath unmodified. If there is no file part ("just_a_dir/") it
+ // returns an empty FilePath ("").
+ // On Windows platform, '\' is the path separator, otherwise it is '/'.
+ FilePath RemoveDirectoryName() const;
+
+ // RemoveFileName returns the directory path with the filename removed.
+ // Example: FilePath("path/to/file").RemoveFileName() returns "path/to/".
+ // If the FilePath is "a_file" or "/a_file", RemoveFileName returns
+ // FilePath("./") or, on Windows, FilePath(".\\"). If the filepath does
+ // not have a file, like "just/a/dir/", it returns the FilePath unmodified.
+ // On Windows platform, '\' is the path separator, otherwise it is '/'.
+ FilePath RemoveFileName() const;
+
+ // Returns a copy of the FilePath with the case-insensitive extension removed.
+ // Example: FilePath("dir/file.exe").RemoveExtension("EXE") returns
+ // FilePath("dir/file"). If a case-insensitive extension is not
+ // found, returns a copy of the original FilePath.
+ FilePath RemoveExtension(const char* extension) const;
+
+ // Creates directories so that path exists. Returns true if successful or if
+ // the directories already exist; returns false if unable to create
+ // directories for any reason. Will also return false if the FilePath does
+ // not represent a directory (that is, it doesn't end with a path separator).
+ bool CreateDirectoriesRecursively() const;
+
+ // Create the directory so that path exists. Returns true if successful or
+ // if the directory already exists; returns false if unable to create the
+ // directory for any reason, including if the parent directory does not
+ // exist. Not named "CreateDirectory" because that's a macro on Windows.
+ bool CreateFolder() const;
+
+ // Returns true if FilePath describes something in the file-system,
+ // either a file, directory, or whatever, and that something exists.
+ bool FileOrDirectoryExists() const;
+
+ // Returns true if pathname describes a directory in the file-system
+ // that exists.
+ bool DirectoryExists() const;
+
+ // Returns true if FilePath ends with a path separator, which indicates that
+ // it is intended to represent a directory. Returns false otherwise.
+ // This does NOT check that a directory (or file) actually exists.
+ bool IsDirectory() const;
+
+ // Returns true if pathname describes a root directory. (Windows has one
+ // root directory per disk drive.)
+ bool IsRootDirectory() const;
+
+ // Returns true if pathname describes an absolute path.
+ bool IsAbsolutePath() const;
+
+ private:
+ // Replaces multiple consecutive separators with a single separator.
+ // For example, "bar///foo" becomes "bar/foo". Does not eliminate other
+ // redundancies that might be in a pathname involving "." or "..".
+ //
+ // A pathname with multiple consecutive separators may occur either through
+ // user error or as a result of some scripts or APIs that generate a pathname
+ // with a trailing separator. On other platforms the same API or script
+ // may NOT generate a pathname with a trailing "/". Then elsewhere that
+ // pathname may have another "/" and pathname components added to it,
+ // without checking for the separator already being there.
+ // The script language and operating system may allow paths like "foo//bar"
+ // but some of the functions in FilePath will not handle that correctly. In
+ // particular, RemoveTrailingPathSeparator() only removes one separator, and
+ // it is called in CreateDirectoriesRecursively() assuming that it will change
+ // a pathname from directory syntax (trailing separator) to filename syntax.
+ //
+ // On Windows this method also replaces the alternate path separator '/' with
+ // the primary path separator '\\', so that for example "bar\\/\\foo" becomes
+ // "bar\\foo".
+
+ void Normalize();
+
+ // Returns a pointer to the last occurence of a valid path separator in
+ // the FilePath. On Windows, for example, both '/' and '\' are valid path
+ // separators. Returns NULL if no path separator was found.
+ const char* FindLastPathSeparator() const;
+
+ std::string pathname_;
+}; // class FilePath
+
+} // namespace internal
+} // namespace testing
+
+#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_FILEPATH_H_
+// This file was GENERATED by command:
+// pump.py gtest-type-util.h.pump
+// DO NOT EDIT BY HAND!!!
+
+// Copyright 2008 Google Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan)
+
+// Type utilities needed for implementing typed and type-parameterized
+// tests. This file is generated by a SCRIPT. DO NOT EDIT BY HAND!
+//
+// Currently we support at most 50 types in a list, and at most 50
+// type-parameterized tests in one type-parameterized test case.
+// Please contact googletestframework@googlegroups.com if you need
+// more.
+
+#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_TYPE_UTIL_H_
+#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_TYPE_UTIL_H_
+
+
+// #ifdef __GNUC__ is too general here. It is possible to use gcc without using
+// libstdc++ (which is where cxxabi.h comes from).
+# if GTEST_HAS_CXXABI_H_
+# include <cxxabi.h>
+# elif defined(__HP_aCC)
+# include <acxx_demangle.h>
+# endif // GTEST_HASH_CXXABI_H_
+
+namespace testing {
+namespace internal {
+
+// GetTypeName<T>() returns a human-readable name of type T.
+// NB: This function is also used in Google Mock, so don't move it inside of
+// the typed-test-only section below.
+template <typename T>
+std::string GetTypeName() {
+# if GTEST_HAS_RTTI
+
+ const char* const name = typeid(T).name();
+# if GTEST_HAS_CXXABI_H_ || defined(__HP_aCC)
+ int status = 0;
+ // gcc's implementation of typeid(T).name() mangles the type name,
+ // so we have to demangle it.
+# if GTEST_HAS_CXXABI_H_
+ using abi::__cxa_demangle;
+# endif // GTEST_HAS_CXXABI_H_
+ char* const readable_name = __cxa_demangle(name, 0, 0, &status);
+ const std::string name_str(status == 0 ? readable_name : name);
+ free(readable_name);
+ return name_str;
+# else
+ return name;
+# endif // GTEST_HAS_CXXABI_H_ || __HP_aCC
+
+# else
+
+ return "<type>";
+
+# endif // GTEST_HAS_RTTI
+}
+
+#if GTEST_HAS_TYPED_TEST || GTEST_HAS_TYPED_TEST_P
+
+// AssertyTypeEq<T1, T2>::type is defined iff T1 and T2 are the same
+// type. This can be used as a compile-time assertion to ensure that
+// two types are equal.
+
+template <typename T1, typename T2>
+struct AssertTypeEq;
+
+template <typename T>
+struct AssertTypeEq<T, T> {
+ typedef bool type;
+};
+
+// A unique type used as the default value for the arguments of class
+// template Types. This allows us to simulate variadic templates
+// (e.g. Types<int>, Type<int, double>, and etc), which C++ doesn't
+// support directly.
+struct None {};
+
+// The following family of struct and struct templates are used to
+// represent type lists. In particular, TypesN<T1, T2, ..., TN>
+// represents a type list with N types (T1, T2, ..., and TN) in it.
+// Except for Types0, every struct in the family has two member types:
+// Head for the first type in the list, and Tail for the rest of the
+// list.
+
+// The empty type list.
+struct Types0 {};
+
+// Type lists of length 1, 2, 3, and so on.
+
+template <typename T1>
+struct Types1 {
+ typedef T1 Head;
+ typedef Types0 Tail;
+};
+template <typename T1, typename T2>
+struct Types2 {
+ typedef T1 Head;
+ typedef Types1<T2> Tail;
+};
+
+template <typename T1, typename T2, typename T3>
+struct Types3 {
+ typedef T1 Head;
+ typedef Types2<T2, T3> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4>
+struct Types4 {
+ typedef T1 Head;
+ typedef Types3<T2, T3, T4> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5>
+struct Types5 {
+ typedef T1 Head;
+ typedef Types4<T2, T3, T4, T5> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6>
+struct Types6 {
+ typedef T1 Head;
+ typedef Types5<T2, T3, T4, T5, T6> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7>
+struct Types7 {
+ typedef T1 Head;
+ typedef Types6<T2, T3, T4, T5, T6, T7> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8>
+struct Types8 {
+ typedef T1 Head;
+ typedef Types7<T2, T3, T4, T5, T6, T7, T8> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9>
+struct Types9 {
+ typedef T1 Head;
+ typedef Types8<T2, T3, T4, T5, T6, T7, T8, T9> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10>
+struct Types10 {
+ typedef T1 Head;
+ typedef Types9<T2, T3, T4, T5, T6, T7, T8, T9, T10> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11>
+struct Types11 {
+ typedef T1 Head;
+ typedef Types10<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12>
+struct Types12 {
+ typedef T1 Head;
+ typedef Types11<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13>
+struct Types13 {
+ typedef T1 Head;
+ typedef Types12<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14>
+struct Types14 {
+ typedef T1 Head;
+ typedef Types13<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15>
+struct Types15 {
+ typedef T1 Head;
+ typedef Types14<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16>
+struct Types16 {
+ typedef T1 Head;
+ typedef Types15<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17>
+struct Types17 {
+ typedef T1 Head;
+ typedef Types16<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18>
+struct Types18 {
+ typedef T1 Head;
+ typedef Types17<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19>
+struct Types19 {
+ typedef T1 Head;
+ typedef Types18<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20>
+struct Types20 {
+ typedef T1 Head;
+ typedef Types19<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21>
+struct Types21 {
+ typedef T1 Head;
+ typedef Types20<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22>
+struct Types22 {
+ typedef T1 Head;
+ typedef Types21<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23>
+struct Types23 {
+ typedef T1 Head;
+ typedef Types22<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24>
+struct Types24 {
+ typedef T1 Head;
+ typedef Types23<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25>
+struct Types25 {
+ typedef T1 Head;
+ typedef Types24<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26>
+struct Types26 {
+ typedef T1 Head;
+ typedef Types25<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27>
+struct Types27 {
+ typedef T1 Head;
+ typedef Types26<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28>
+struct Types28 {
+ typedef T1 Head;
+ typedef Types27<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29>
+struct Types29 {
+ typedef T1 Head;
+ typedef Types28<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30>
+struct Types30 {
+ typedef T1 Head;
+ typedef Types29<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31>
+struct Types31 {
+ typedef T1 Head;
+ typedef Types30<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32>
+struct Types32 {
+ typedef T1 Head;
+ typedef Types31<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33>
+struct Types33 {
+ typedef T1 Head;
+ typedef Types32<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34>
+struct Types34 {
+ typedef T1 Head;
+ typedef Types33<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35>
+struct Types35 {
+ typedef T1 Head;
+ typedef Types34<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36>
+struct Types36 {
+ typedef T1 Head;
+ typedef Types35<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37>
+struct Types37 {
+ typedef T1 Head;
+ typedef Types36<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38>
+struct Types38 {
+ typedef T1 Head;
+ typedef Types37<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37, T38> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39>
+struct Types39 {
+ typedef T1 Head;
+ typedef Types38<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37, T38, T39> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40>
+struct Types40 {
+ typedef T1 Head;
+ typedef Types39<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41>
+struct Types41 {
+ typedef T1 Head;
+ typedef Types40<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42>
+struct Types42 {
+ typedef T1 Head;
+ typedef Types41<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43>
+struct Types43 {
+ typedef T1 Head;
+ typedef Types42<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42,
+ T43> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44>
+struct Types44 {
+ typedef T1 Head;
+ typedef Types43<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
+ T44> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44, typename T45>
+struct Types45 {
+ typedef T1 Head;
+ typedef Types44<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
+ T44, T45> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44, typename T45,
+ typename T46>
+struct Types46 {
+ typedef T1 Head;
+ typedef Types45<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
+ T44, T45, T46> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44, typename T45,
+ typename T46, typename T47>
+struct Types47 {
+ typedef T1 Head;
+ typedef Types46<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
+ T44, T45, T46, T47> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44, typename T45,
+ typename T46, typename T47, typename T48>
+struct Types48 {
+ typedef T1 Head;
+ typedef Types47<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
+ T44, T45, T46, T47, T48> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44, typename T45,
+ typename T46, typename T47, typename T48, typename T49>
+struct Types49 {
+ typedef T1 Head;
+ typedef Types48<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
+ T44, T45, T46, T47, T48, T49> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44, typename T45,
+ typename T46, typename T47, typename T48, typename T49, typename T50>
+struct Types50 {
+ typedef T1 Head;
+ typedef Types49<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
+ T44, T45, T46, T47, T48, T49, T50> Tail;
+};
+
+
+} // namespace internal
+
+// We don't want to require the users to write TypesN<...> directly,
+// as that would require them to count the length. Types<...> is much
+// easier to write, but generates horrible messages when there is a
+// compiler error, as gcc insists on printing out each template
+// argument, even if it has the default value (this means Types<int>
+// will appear as Types<int, None, None, ..., None> in the compiler
+// errors).
+//
+// Our solution is to combine the best part of the two approaches: a
+// user would write Types<T1, ..., TN>, and Google Test will translate
+// that to TypesN<T1, ..., TN> internally to make error messages
+// readable. The translation is done by the 'type' member of the
+// Types template.
+template <typename T1 = internal::None, typename T2 = internal::None,
+ typename T3 = internal::None, typename T4 = internal::None,
+ typename T5 = internal::None, typename T6 = internal::None,
+ typename T7 = internal::None, typename T8 = internal::None,
+ typename T9 = internal::None, typename T10 = internal::None,
+ typename T11 = internal::None, typename T12 = internal::None,
+ typename T13 = internal::None, typename T14 = internal::None,
+ typename T15 = internal::None, typename T16 = internal::None,
+ typename T17 = internal::None, typename T18 = internal::None,
+ typename T19 = internal::None, typename T20 = internal::None,
+ typename T21 = internal::None, typename T22 = internal::None,
+ typename T23 = internal::None, typename T24 = internal::None,
+ typename T25 = internal::None, typename T26 = internal::None,
+ typename T27 = internal::None, typename T28 = internal::None,
+ typename T29 = internal::None, typename T30 = internal::None,
+ typename T31 = internal::None, typename T32 = internal::None,
+ typename T33 = internal::None, typename T34 = internal::None,
+ typename T35 = internal::None, typename T36 = internal::None,
+ typename T37 = internal::None, typename T38 = internal::None,
+ typename T39 = internal::None, typename T40 = internal::None,
+ typename T41 = internal::None, typename T42 = internal::None,
+ typename T43 = internal::None, typename T44 = internal::None,
+ typename T45 = internal::None, typename T46 = internal::None,
+ typename T47 = internal::None, typename T48 = internal::None,
+ typename T49 = internal::None, typename T50 = internal::None>
+struct Types {
+ typedef internal::Types50<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40,
+ T41, T42, T43, T44, T45, T46, T47, T48, T49, T50> type;
+};
+
+template <>
+struct Types<internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None> {
+ typedef internal::Types0 type;
+};
+template <typename T1>
+struct Types<T1, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None> {
+ typedef internal::Types1<T1> type;
+};
+template <typename T1, typename T2>
+struct Types<T1, T2, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None> {
+ typedef internal::Types2<T1, T2> type;
+};
+template <typename T1, typename T2, typename T3>
+struct Types<T1, T2, T3, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None> {
+ typedef internal::Types3<T1, T2, T3> type;
+};
+template <typename T1, typename T2, typename T3, typename T4>
+struct Types<T1, T2, T3, T4, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None> {
+ typedef internal::Types4<T1, T2, T3, T4> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5>
+struct Types<T1, T2, T3, T4, T5, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None> {
+ typedef internal::Types5<T1, T2, T3, T4, T5> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6>
+struct Types<T1, T2, T3, T4, T5, T6, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None> {
+ typedef internal::Types6<T1, T2, T3, T4, T5, T6> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7>
+struct Types<T1, T2, T3, T4, T5, T6, T7, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None> {
+ typedef internal::Types7<T1, T2, T3, T4, T5, T6, T7> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None> {
+ typedef internal::Types8<T1, T2, T3, T4, T5, T6, T7, T8> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None> {
+ typedef internal::Types9<T1, T2, T3, T4, T5, T6, T7, T8, T9> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None> {
+ typedef internal::Types10<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None> {
+ typedef internal::Types11<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None> {
+ typedef internal::Types12<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None> {
+ typedef internal::Types13<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None> {
+ typedef internal::Types14<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None> {
+ typedef internal::Types15<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None> {
+ typedef internal::Types16<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None> {
+ typedef internal::Types17<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None> {
+ typedef internal::Types18<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None> {
+ typedef internal::Types19<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None> {
+ typedef internal::Types20<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None> {
+ typedef internal::Types21<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None> {
+ typedef internal::Types22<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None> {
+ typedef internal::Types23<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None> {
+ typedef internal::Types24<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None> {
+ typedef internal::Types25<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None> {
+ typedef internal::Types26<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None> {
+ typedef internal::Types27<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None> {
+ typedef internal::Types28<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27, T28> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None> {
+ typedef internal::Types29<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27, T28, T29> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None> {
+ typedef internal::Types30<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27, T28, T29, T30> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
+ T31, internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None> {
+ typedef internal::Types31<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27, T28, T29, T30, T31> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
+ T31, T32, internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None> {
+ typedef internal::Types32<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27, T28, T29, T30, T31, T32> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
+ T31, T32, T33, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None> {
+ typedef internal::Types33<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27, T28, T29, T30, T31, T32, T33> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
+ T31, T32, T33, T34, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None> {
+ typedef internal::Types34<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27, T28, T29, T30, T31, T32, T33, T34> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
+ T31, T32, T33, T34, T35, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None> {
+ typedef internal::Types35<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27, T28, T29, T30, T31, T32, T33, T34, T35> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
+ T31, T32, T33, T34, T35, T36, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None> {
+ typedef internal::Types36<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27, T28, T29, T30, T31, T32, T33, T34, T35, T36> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
+ T31, T32, T33, T34, T35, T36, T37, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None> {
+ typedef internal::Types37<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
+ T31, T32, T33, T34, T35, T36, T37, T38, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None> {
+ typedef internal::Types38<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
+ T31, T32, T33, T34, T35, T36, T37, T38, T39, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None> {
+ typedef internal::Types39<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
+ T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None> {
+ typedef internal::Types40<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39,
+ T40> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
+ T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None> {
+ typedef internal::Types41<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40,
+ T41> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
+ T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None> {
+ typedef internal::Types42<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40,
+ T41, T42> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
+ T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None> {
+ typedef internal::Types43<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40,
+ T41, T42, T43> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
+ T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43, T44,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None> {
+ typedef internal::Types44<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40,
+ T41, T42, T43, T44> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44, typename T45>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
+ T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43, T44, T45,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None> {
+ typedef internal::Types45<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40,
+ T41, T42, T43, T44, T45> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44, typename T45,
+ typename T46>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
+ T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43, T44, T45,
+ T46, internal::None, internal::None, internal::None, internal::None> {
+ typedef internal::Types46<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40,
+ T41, T42, T43, T44, T45, T46> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44, typename T45,
+ typename T46, typename T47>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
+ T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43, T44, T45,
+ T46, T47, internal::None, internal::None, internal::None> {
+ typedef internal::Types47<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40,
+ T41, T42, T43, T44, T45, T46, T47> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44, typename T45,
+ typename T46, typename T47, typename T48>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
+ T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43, T44, T45,
+ T46, T47, T48, internal::None, internal::None> {
+ typedef internal::Types48<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40,
+ T41, T42, T43, T44, T45, T46, T47, T48> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44, typename T45,
+ typename T46, typename T47, typename T48, typename T49>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
+ T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43, T44, T45,
+ T46, T47, T48, T49, internal::None> {
+ typedef internal::Types49<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40,
+ T41, T42, T43, T44, T45, T46, T47, T48, T49> type;
+};
+
+namespace internal {
+
+# define GTEST_TEMPLATE_ template <typename T> class
+
+// The template "selector" struct TemplateSel<Tmpl> is used to
+// represent Tmpl, which must be a class template with one type
+// parameter, as a type. TemplateSel<Tmpl>::Bind<T>::type is defined
+// as the type Tmpl<T>. This allows us to actually instantiate the
+// template "selected" by TemplateSel<Tmpl>.
+//
+// This trick is necessary for simulating typedef for class templates,
+// which C++ doesn't support directly.
+template <GTEST_TEMPLATE_ Tmpl>
+struct TemplateSel {
+ template <typename T>
+ struct Bind {
+ typedef Tmpl<T> type;
+ };
+};
+
+# define GTEST_BIND_(TmplSel, T) \
+ TmplSel::template Bind<T>::type
+
+// A unique struct template used as the default value for the
+// arguments of class template Templates. This allows us to simulate
+// variadic templates (e.g. Templates<int>, Templates<int, double>,
+// and etc), which C++ doesn't support directly.
+template <typename T>
+struct NoneT {};
+
+// The following family of struct and struct templates are used to
+// represent template lists. In particular, TemplatesN<T1, T2, ...,
+// TN> represents a list of N templates (T1, T2, ..., and TN). Except
+// for Templates0, every struct in the family has two member types:
+// Head for the selector of the first template in the list, and Tail
+// for the rest of the list.
+
+// The empty template list.
+struct Templates0 {};
+
+// Template lists of length 1, 2, 3, and so on.
+
+template <GTEST_TEMPLATE_ T1>
+struct Templates1 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates0 Tail;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2>
+struct Templates2 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates1<T2> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3>
+struct Templates3 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates2<T2, T3> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4>
+struct Templates4 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates3<T2, T3, T4> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5>
+struct Templates5 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates4<T2, T3, T4, T5> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6>
+struct Templates6 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates5<T2, T3, T4, T5, T6> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7>
+struct Templates7 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates6<T2, T3, T4, T5, T6, T7> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8>
+struct Templates8 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates7<T2, T3, T4, T5, T6, T7, T8> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9>
+struct Templates9 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates8<T2, T3, T4, T5, T6, T7, T8, T9> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10>
+struct Templates10 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates9<T2, T3, T4, T5, T6, T7, T8, T9, T10> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11>
+struct Templates11 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates10<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12>
+struct Templates12 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates11<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13>
+struct Templates13 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates12<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14>
+struct Templates14 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates13<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15>
+struct Templates15 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates14<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16>
+struct Templates16 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates15<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17>
+struct Templates17 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates16<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18>
+struct Templates18 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates17<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19>
+struct Templates19 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates18<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20>
+struct Templates20 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates19<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21>
+struct Templates21 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates20<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22>
+struct Templates22 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates21<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23>
+struct Templates23 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates22<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24>
+struct Templates24 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates23<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25>
+struct Templates25 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates24<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26>
+struct Templates26 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates25<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27>
+struct Templates27 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates26<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28>
+struct Templates28 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates27<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+ T28> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29>
+struct Templates29 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates28<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30>
+struct Templates30 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates29<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31>
+struct Templates31 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates30<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32>
+struct Templates32 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates31<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33>
+struct Templates33 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates32<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34>
+struct Templates34 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates33<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35>
+struct Templates35 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates34<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36>
+struct Templates36 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates35<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37>
+struct Templates37 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates36<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38>
+struct Templates38 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates37<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39>
+struct Templates39 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates38<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+ GTEST_TEMPLATE_ T40>
+struct Templates40 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates39<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+ GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41>
+struct Templates41 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates40<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+ GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42>
+struct Templates42 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates41<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41,
+ T42> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+ GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
+ GTEST_TEMPLATE_ T43>
+struct Templates43 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates42<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42,
+ T43> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+ GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
+ GTEST_TEMPLATE_ T43, GTEST_TEMPLATE_ T44>
+struct Templates44 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates43<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42,
+ T43, T44> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+ GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
+ GTEST_TEMPLATE_ T43, GTEST_TEMPLATE_ T44, GTEST_TEMPLATE_ T45>
+struct Templates45 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates44<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42,
+ T43, T44, T45> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+ GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
+ GTEST_TEMPLATE_ T43, GTEST_TEMPLATE_ T44, GTEST_TEMPLATE_ T45,
+ GTEST_TEMPLATE_ T46>
+struct Templates46 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates45<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42,
+ T43, T44, T45, T46> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+ GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
+ GTEST_TEMPLATE_ T43, GTEST_TEMPLATE_ T44, GTEST_TEMPLATE_ T45,
+ GTEST_TEMPLATE_ T46, GTEST_TEMPLATE_ T47>
+struct Templates47 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates46<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42,
+ T43, T44, T45, T46, T47> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+ GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
+ GTEST_TEMPLATE_ T43, GTEST_TEMPLATE_ T44, GTEST_TEMPLATE_ T45,
+ GTEST_TEMPLATE_ T46, GTEST_TEMPLATE_ T47, GTEST_TEMPLATE_ T48>
+struct Templates48 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates47<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42,
+ T43, T44, T45, T46, T47, T48> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+ GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
+ GTEST_TEMPLATE_ T43, GTEST_TEMPLATE_ T44, GTEST_TEMPLATE_ T45,
+ GTEST_TEMPLATE_ T46, GTEST_TEMPLATE_ T47, GTEST_TEMPLATE_ T48,
+ GTEST_TEMPLATE_ T49>
+struct Templates49 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates48<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42,
+ T43, T44, T45, T46, T47, T48, T49> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+ GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
+ GTEST_TEMPLATE_ T43, GTEST_TEMPLATE_ T44, GTEST_TEMPLATE_ T45,
+ GTEST_TEMPLATE_ T46, GTEST_TEMPLATE_ T47, GTEST_TEMPLATE_ T48,
+ GTEST_TEMPLATE_ T49, GTEST_TEMPLATE_ T50>
+struct Templates50 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates49<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42,
+ T43, T44, T45, T46, T47, T48, T49, T50> Tail;
+};
+
+
+// We don't want to require the users to write TemplatesN<...> directly,
+// as that would require them to count the length. Templates<...> is much
+// easier to write, but generates horrible messages when there is a
+// compiler error, as gcc insists on printing out each template
+// argument, even if it has the default value (this means Templates<list>
+// will appear as Templates<list, NoneT, NoneT, ..., NoneT> in the compiler
+// errors).
+//
+// Our solution is to combine the best part of the two approaches: a
+// user would write Templates<T1, ..., TN>, and Google Test will translate
+// that to TemplatesN<T1, ..., TN> internally to make error messages
+// readable. The translation is done by the 'type' member of the
+// Templates template.
+template <GTEST_TEMPLATE_ T1 = NoneT, GTEST_TEMPLATE_ T2 = NoneT,
+ GTEST_TEMPLATE_ T3 = NoneT, GTEST_TEMPLATE_ T4 = NoneT,
+ GTEST_TEMPLATE_ T5 = NoneT, GTEST_TEMPLATE_ T6 = NoneT,
+ GTEST_TEMPLATE_ T7 = NoneT, GTEST_TEMPLATE_ T8 = NoneT,
+ GTEST_TEMPLATE_ T9 = NoneT, GTEST_TEMPLATE_ T10 = NoneT,
+ GTEST_TEMPLATE_ T11 = NoneT, GTEST_TEMPLATE_ T12 = NoneT,
+ GTEST_TEMPLATE_ T13 = NoneT, GTEST_TEMPLATE_ T14 = NoneT,
+ GTEST_TEMPLATE_ T15 = NoneT, GTEST_TEMPLATE_ T16 = NoneT,
+ GTEST_TEMPLATE_ T17 = NoneT, GTEST_TEMPLATE_ T18 = NoneT,
+ GTEST_TEMPLATE_ T19 = NoneT, GTEST_TEMPLATE_ T20 = NoneT,
+ GTEST_TEMPLATE_ T21 = NoneT, GTEST_TEMPLATE_ T22 = NoneT,
+ GTEST_TEMPLATE_ T23 = NoneT, GTEST_TEMPLATE_ T24 = NoneT,
+ GTEST_TEMPLATE_ T25 = NoneT, GTEST_TEMPLATE_ T26 = NoneT,
+ GTEST_TEMPLATE_ T27 = NoneT, GTEST_TEMPLATE_ T28 = NoneT,
+ GTEST_TEMPLATE_ T29 = NoneT, GTEST_TEMPLATE_ T30 = NoneT,
+ GTEST_TEMPLATE_ T31 = NoneT, GTEST_TEMPLATE_ T32 = NoneT,
+ GTEST_TEMPLATE_ T33 = NoneT, GTEST_TEMPLATE_ T34 = NoneT,
+ GTEST_TEMPLATE_ T35 = NoneT, GTEST_TEMPLATE_ T36 = NoneT,
+ GTEST_TEMPLATE_ T37 = NoneT, GTEST_TEMPLATE_ T38 = NoneT,
+ GTEST_TEMPLATE_ T39 = NoneT, GTEST_TEMPLATE_ T40 = NoneT,
+ GTEST_TEMPLATE_ T41 = NoneT, GTEST_TEMPLATE_ T42 = NoneT,
+ GTEST_TEMPLATE_ T43 = NoneT, GTEST_TEMPLATE_ T44 = NoneT,
+ GTEST_TEMPLATE_ T45 = NoneT, GTEST_TEMPLATE_ T46 = NoneT,
+ GTEST_TEMPLATE_ T47 = NoneT, GTEST_TEMPLATE_ T48 = NoneT,
+ GTEST_TEMPLATE_ T49 = NoneT, GTEST_TEMPLATE_ T50 = NoneT>
+struct Templates {
+ typedef Templates50<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+ T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41,
+ T42, T43, T44, T45, T46, T47, T48, T49, T50> type;
+};
+
+template <>
+struct Templates<NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT> {
+ typedef Templates0 type;
+};
+template <GTEST_TEMPLATE_ T1>
+struct Templates<T1, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT> {
+ typedef Templates1<T1> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2>
+struct Templates<T1, T2, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT> {
+ typedef Templates2<T1, T2> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3>
+struct Templates<T1, T2, T3, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates3<T1, T2, T3> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4>
+struct Templates<T1, T2, T3, T4, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates4<T1, T2, T3, T4> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5>
+struct Templates<T1, T2, T3, T4, T5, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates5<T1, T2, T3, T4, T5> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6>
+struct Templates<T1, T2, T3, T4, T5, T6, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates6<T1, T2, T3, T4, T5, T6> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates7<T1, T2, T3, T4, T5, T6, T7> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates8<T1, T2, T3, T4, T5, T6, T7, T8> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates9<T1, T2, T3, T4, T5, T6, T7, T8, T9> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates10<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates11<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates12<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates13<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates14<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates15<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates16<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates17<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates18<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates19<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates20<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates21<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT> {
+ typedef Templates22<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT> {
+ typedef Templates23<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT> {
+ typedef Templates24<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT> {
+ typedef Templates25<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT> {
+ typedef Templates26<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT> {
+ typedef Templates27<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT> {
+ typedef Templates28<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+ T28> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT> {
+ typedef Templates29<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+ T28, T29> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates30<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+ T28, T29, T30> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates31<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+ T28, T29, T30, T31> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates32<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+ T28, T29, T30, T31, T32> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates33<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+ T28, T29, T30, T31, T32, T33> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates34<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+ T28, T29, T30, T31, T32, T33, T34> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates35<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+ T28, T29, T30, T31, T32, T33, T34, T35> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates36<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+ T28, T29, T30, T31, T32, T33, T34, T35, T36> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates37<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+ T28, T29, T30, T31, T32, T33, T34, T35, T36, T37> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37, T38, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates38<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+ T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates39<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+ T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+ GTEST_TEMPLATE_ T40>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates40<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+ T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+ GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates41<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+ T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40,
+ T41> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+ GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates42<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+ T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41,
+ T42> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+ GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
+ GTEST_TEMPLATE_ T43>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates43<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+ T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41,
+ T42, T43> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+ GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
+ GTEST_TEMPLATE_ T43, GTEST_TEMPLATE_ T44>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43, T44,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates44<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+ T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41,
+ T42, T43, T44> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+ GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
+ GTEST_TEMPLATE_ T43, GTEST_TEMPLATE_ T44, GTEST_TEMPLATE_ T45>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43, T44,
+ T45, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates45<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+ T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41,
+ T42, T43, T44, T45> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+ GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
+ GTEST_TEMPLATE_ T43, GTEST_TEMPLATE_ T44, GTEST_TEMPLATE_ T45,
+ GTEST_TEMPLATE_ T46>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43, T44,
+ T45, T46, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates46<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+ T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41,
+ T42, T43, T44, T45, T46> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+ GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
+ GTEST_TEMPLATE_ T43, GTEST_TEMPLATE_ T44, GTEST_TEMPLATE_ T45,
+ GTEST_TEMPLATE_ T46, GTEST_TEMPLATE_ T47>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43, T44,
+ T45, T46, T47, NoneT, NoneT, NoneT> {
+ typedef Templates47<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+ T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41,
+ T42, T43, T44, T45, T46, T47> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+ GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
+ GTEST_TEMPLATE_ T43, GTEST_TEMPLATE_ T44, GTEST_TEMPLATE_ T45,
+ GTEST_TEMPLATE_ T46, GTEST_TEMPLATE_ T47, GTEST_TEMPLATE_ T48>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43, T44,
+ T45, T46, T47, T48, NoneT, NoneT> {
+ typedef Templates48<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+ T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41,
+ T42, T43, T44, T45, T46, T47, T48> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+ GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
+ GTEST_TEMPLATE_ T43, GTEST_TEMPLATE_ T44, GTEST_TEMPLATE_ T45,
+ GTEST_TEMPLATE_ T46, GTEST_TEMPLATE_ T47, GTEST_TEMPLATE_ T48,
+ GTEST_TEMPLATE_ T49>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43, T44,
+ T45, T46, T47, T48, T49, NoneT> {
+ typedef Templates49<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+ T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41,
+ T42, T43, T44, T45, T46, T47, T48, T49> type;
+};
+
+// The TypeList template makes it possible to use either a single type
+// or a Types<...> list in TYPED_TEST_CASE() and
+// INSTANTIATE_TYPED_TEST_CASE_P().
+
+template <typename T>
+struct TypeList {
+ typedef Types1<T> type;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44, typename T45,
+ typename T46, typename T47, typename T48, typename T49, typename T50>
+struct TypeList<Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
+ T44, T45, T46, T47, T48, T49, T50> > {
+ typedef typename Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40,
+ T41, T42, T43, T44, T45, T46, T47, T48, T49, T50>::type type;
+};
+
+#endif // GTEST_HAS_TYPED_TEST || GTEST_HAS_TYPED_TEST_P
+
+} // namespace internal
+} // namespace testing
+
+#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_TYPE_UTIL_H_
+
+// Due to C++ preprocessor weirdness, we need double indirection to
+// concatenate two tokens when one of them is __LINE__. Writing
+//
+// foo ## __LINE__
+//
+// will result in the token foo__LINE__, instead of foo followed by
+// the current line number. For more details, see
+// http://www.parashift.com/c++-faq-lite/misc-technical-issues.html#faq-39.6
+#define GTEST_CONCAT_TOKEN_(foo, bar) GTEST_CONCAT_TOKEN_IMPL_(foo, bar)
+#define GTEST_CONCAT_TOKEN_IMPL_(foo, bar) foo ## bar
+
+class ProtocolMessage;
+namespace proto2 { class Message; }
+
+namespace testing {
+
+// Forward declarations.
+
+class AssertionResult; // Result of an assertion.
+class Message; // Represents a failure message.
+class Test; // Represents a test.
+class TestInfo; // Information about a test.
+class TestPartResult; // Result of a test part.
+class UnitTest; // A collection of test cases.
+
+template <typename T>
+::std::string PrintToString(const T& value);
+
+namespace internal {
+
+struct TraceInfo; // Information about a trace point.
+class ScopedTrace; // Implements scoped trace.
+class TestInfoImpl; // Opaque implementation of TestInfo
+class UnitTestImpl; // Opaque implementation of UnitTest
+
+// How many times InitGoogleTest() has been called.
+GTEST_API_ extern int g_init_gtest_count;
+
+// The text used in failure messages to indicate the start of the
+// stack trace.
+GTEST_API_ extern const char kStackTraceMarker[];
+
+// Two overloaded helpers for checking at compile time whether an
+// expression is a null pointer literal (i.e. NULL or any 0-valued
+// compile-time integral constant). Their return values have
+// different sizes, so we can use sizeof() to test which version is
+// picked by the compiler. These helpers have no implementations, as
+// we only need their signatures.
+//
+// Given IsNullLiteralHelper(x), the compiler will pick the first
+// version if x can be implicitly converted to Secret*, and pick the
+// second version otherwise. Since Secret is a secret and incomplete
+// type, the only expression a user can write that has type Secret* is
+// a null pointer literal. Therefore, we know that x is a null
+// pointer literal if and only if the first version is picked by the
+// compiler.
+char IsNullLiteralHelper(Secret* p);
+char (&IsNullLiteralHelper(...))[2]; // NOLINT
+
+// A compile-time bool constant that is true if and only if x is a
+// null pointer literal (i.e. NULL or any 0-valued compile-time
+// integral constant).
+#ifdef GTEST_ELLIPSIS_NEEDS_POD_
+// We lose support for NULL detection where the compiler doesn't like
+// passing non-POD classes through ellipsis (...).
+# define GTEST_IS_NULL_LITERAL_(x) false
+#else
+# define GTEST_IS_NULL_LITERAL_(x) \
+ (sizeof(::testing::internal::IsNullLiteralHelper(x)) == 1)
+#endif // GTEST_ELLIPSIS_NEEDS_POD_
+
+// Appends the user-supplied message to the Google-Test-generated message.
+GTEST_API_ std::string AppendUserMessage(
+ const std::string& gtest_msg, const Message& user_msg);
+
+#if GTEST_HAS_EXCEPTIONS
+
+// This exception is thrown by (and only by) a failed Google Test
+// assertion when GTEST_FLAG(throw_on_failure) is true (if exceptions
+// are enabled). We derive it from std::runtime_error, which is for
+// errors presumably detectable only at run time. Since
+// std::runtime_error inherits from std::exception, many testing
+// frameworks know how to extract and print the message inside it.
+
+#ifdef _MSC_VER
+# pragma warning(push) // Saves the current warning state.
+# pragma warning(disable:4275) // Temporarily disables warning 4275.
+#endif // _MSC_VER
+
+class GTEST_API_ GoogleTestFailureException : public ::std::runtime_error {
+ public:
+ explicit GoogleTestFailureException(const TestPartResult& failure);
+};
+
+#ifdef _MSC_VER
+# pragma warning(pop) // Restores the warning state.
+#endif // _MSC_VER
+
+#endif // GTEST_HAS_EXCEPTIONS
+
+// A helper class for creating scoped traces in user programs.
+class GTEST_API_ ScopedTrace {
+ public:
+ // The c'tor pushes the given source file location and message onto
+ // a trace stack maintained by Google Test.
+ ScopedTrace(const char* file, int line, const Message& message);
+
+ // The d'tor pops the info pushed by the c'tor.
+ //
+ // Note that the d'tor is not virtual in order to be efficient.
+ // Don't inherit from ScopedTrace!
+ ~ScopedTrace();
+
+ private:
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(ScopedTrace);
+} GTEST_ATTRIBUTE_UNUSED_; // A ScopedTrace object does its job in its
+ // c'tor and d'tor. Therefore it doesn't
+ // need to be used otherwise.
+
+// Constructs and returns the message for an equality assertion
+// (e.g. ASSERT_EQ, EXPECT_STREQ, etc) failure.
+//
+// The first four parameters are the expressions used in the assertion
+// and their values, as strings. For example, for ASSERT_EQ(foo, bar)
+// where foo is 5 and bar is 6, we have:
+//
+// expected_expression: "foo"
+// actual_expression: "bar"
+// expected_value: "5"
+// actual_value: "6"
+//
+// The ignoring_case parameter is true iff the assertion is a
+// *_STRCASEEQ*. When it's true, the string " (ignoring case)" will
+// be inserted into the message.
+GTEST_API_ AssertionResult EqFailure(const char* expected_expression,
+ const char* actual_expression,
+ const std::string& expected_value,
+ const std::string& actual_value,
+ bool ignoring_case);
+
+// Constructs a failure message for Boolean assertions such as EXPECT_TRUE.
+GTEST_API_ std::string GetBoolAssertionFailureMessage(
+ const AssertionResult& assertion_result,
+ const char* expression_text,
+ const char* actual_predicate_value,
+ const char* expected_predicate_value);
+
+// This template class represents an IEEE floating-point number
+// (either single-precision or double-precision, depending on the
+// template parameters).
+//
+// The purpose of this class is to do more sophisticated number
+// comparison. (Due to round-off error, etc, it's very unlikely that
+// two floating-points will be equal exactly. Hence a naive
+// comparison by the == operation often doesn't work.)
+//
+// Format of IEEE floating-point:
+//
+// The most-significant bit being the leftmost, an IEEE
+// floating-point looks like
+//
+// sign_bit exponent_bits fraction_bits
+//
+// Here, sign_bit is a single bit that designates the sign of the
+// number.
+//
+// For float, there are 8 exponent bits and 23 fraction bits.
+//
+// For double, there are 11 exponent bits and 52 fraction bits.
+//
+// More details can be found at
+// http://en.wikipedia.org/wiki/IEEE_floating-point_standard.
+//
+// Template parameter:
+//
+// RawType: the raw floating-point type (either float or double)
+template <typename RawType>
+class FloatingPoint {
+ public:
+ // Defines the unsigned integer type that has the same size as the
+ // floating point number.
+ typedef typename TypeWithSize<sizeof(RawType)>::UInt Bits;
+
+ // Constants.
+
+ // # of bits in a number.
+ static const size_t kBitCount = 8*sizeof(RawType);
+
+ // # of fraction bits in a number.
+ static const size_t kFractionBitCount =
+ std::numeric_limits<RawType>::digits - 1;
+
+ // # of exponent bits in a number.
+ static const size_t kExponentBitCount = kBitCount - 1 - kFractionBitCount;
+
+ // The mask for the sign bit.
+ static const Bits kSignBitMask = static_cast<Bits>(1) << (kBitCount - 1);
+
+ // The mask for the fraction bits.
+ static const Bits kFractionBitMask =
+ ~static_cast<Bits>(0) >> (kExponentBitCount + 1);
+
+ // The mask for the exponent bits.
+ static const Bits kExponentBitMask = ~(kSignBitMask | kFractionBitMask);
+
+ // How many ULP's (Units in the Last Place) we want to tolerate when
+ // comparing two numbers. The larger the value, the more error we
+ // allow. A 0 value means that two numbers must be exactly the same
+ // to be considered equal.
+ //
+ // The maximum error of a single floating-point operation is 0.5
+ // units in the last place. On Intel CPU's, all floating-point
+ // calculations are done with 80-bit precision, while double has 64
+ // bits. Therefore, 4 should be enough for ordinary use.
+ //
+ // See the following article for more details on ULP:
+ // http://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/
+ static const size_t kMaxUlps = 4;
+
+ // Constructs a FloatingPoint from a raw floating-point number.
+ //
+ // On an Intel CPU, passing a non-normalized NAN (Not a Number)
+ // around may change its bits, although the new value is guaranteed
+ // to be also a NAN. Therefore, don't expect this constructor to
+ // preserve the bits in x when x is a NAN.
+ explicit FloatingPoint(const RawType& x) { u_.value_ = x; }
+
+ // Static methods
+
+ // Reinterprets a bit pattern as a floating-point number.
+ //
+ // This function is needed to test the AlmostEquals() method.
+ static RawType ReinterpretBits(const Bits bits) {
+ FloatingPoint fp(0);
+ fp.u_.bits_ = bits;
+ return fp.u_.value_;
+ }
+
+ // Returns the floating-point number that represent positive infinity.
+ static RawType Infinity() {
+ return ReinterpretBits(kExponentBitMask);
+ }
+
+ // Returns the maximum representable finite floating-point number.
+ static RawType Max();
+
+ // Non-static methods
+
+ // Returns the bits that represents this number.
+ const Bits &bits() const { return u_.bits_; }
+
+ // Returns the exponent bits of this number.
+ Bits exponent_bits() const { return kExponentBitMask & u_.bits_; }
+
+ // Returns the fraction bits of this number.
+ Bits fraction_bits() const { return kFractionBitMask & u_.bits_; }
+
+ // Returns the sign bit of this number.
+ Bits sign_bit() const { return kSignBitMask & u_.bits_; }
+
+ // Returns true iff this is NAN (not a number).
+ bool is_nan() const {
+ // It's a NAN if the exponent bits are all ones and the fraction
+ // bits are not entirely zeros.
+ return (exponent_bits() == kExponentBitMask) && (fraction_bits() != 0);
+ }
+
+ // Returns true iff this number is at most kMaxUlps ULP's away from
+ // rhs. In particular, this function:
+ //
+ // - returns false if either number is (or both are) NAN.
+ // - treats really large numbers as almost equal to infinity.
+ // - thinks +0.0 and -0.0 are 0 DLP's apart.
+ bool AlmostEquals(const FloatingPoint& rhs) const {
+ // The IEEE standard says that any comparison operation involving
+ // a NAN must return false.
+ if (is_nan() || rhs.is_nan()) return false;
+
+ return DistanceBetweenSignAndMagnitudeNumbers(u_.bits_, rhs.u_.bits_)
+ <= kMaxUlps;
+ }
+
+ private:
+ // The data type used to store the actual floating-point number.
+ union FloatingPointUnion {
+ RawType value_; // The raw floating-point number.
+ Bits bits_; // The bits that represent the number.
+ };
+
+ // Converts an integer from the sign-and-magnitude representation to
+ // the biased representation. More precisely, let N be 2 to the
+ // power of (kBitCount - 1), an integer x is represented by the
+ // unsigned number x + N.
+ //
+ // For instance,
+ //
+ // -N + 1 (the most negative number representable using
+ // sign-and-magnitude) is represented by 1;
+ // 0 is represented by N; and
+ // N - 1 (the biggest number representable using
+ // sign-and-magnitude) is represented by 2N - 1.
+ //
+ // Read http://en.wikipedia.org/wiki/Signed_number_representations
+ // for more details on signed number representations.
+ static Bits SignAndMagnitudeToBiased(const Bits &sam) {
+ if (kSignBitMask & sam) {
+ // sam represents a negative number.
+ return ~sam + 1;
+ } else {
+ // sam represents a positive number.
+ return kSignBitMask | sam;
+ }
+ }
+
+ // Given two numbers in the sign-and-magnitude representation,
+ // returns the distance between them as an unsigned number.
+ static Bits DistanceBetweenSignAndMagnitudeNumbers(const Bits &sam1,
+ const Bits &sam2) {
+ const Bits biased1 = SignAndMagnitudeToBiased(sam1);
+ const Bits biased2 = SignAndMagnitudeToBiased(sam2);
+ return (biased1 >= biased2) ? (biased1 - biased2) : (biased2 - biased1);
+ }
+
+ FloatingPointUnion u_;
+};
+
+// We cannot use std::numeric_limits<T>::max() as it clashes with the max()
+// macro defined by <windows.h>.
+template <>
+inline float FloatingPoint<float>::Max() { return FLT_MAX; }
+template <>
+inline double FloatingPoint<double>::Max() { return DBL_MAX; }
+
+// Typedefs the instances of the FloatingPoint template class that we
+// care to use.
+typedef FloatingPoint<float> Float;
+typedef FloatingPoint<double> Double;
+
+// In order to catch the mistake of putting tests that use different
+// test fixture classes in the same test case, we need to assign
+// unique IDs to fixture classes and compare them. The TypeId type is
+// used to hold such IDs. The user should treat TypeId as an opaque
+// type: the only operation allowed on TypeId values is to compare
+// them for equality using the == operator.
+typedef const void* TypeId;
+
+template <typename T>
+class TypeIdHelper {
+ public:
+ // dummy_ must not have a const type. Otherwise an overly eager
+ // compiler (e.g. MSVC 7.1 & 8.0) may try to merge
+ // TypeIdHelper<T>::dummy_ for different Ts as an "optimization".
+ static bool dummy_;
+};
+
+template <typename T>
+bool TypeIdHelper<T>::dummy_ = false;
+
+// GetTypeId<T>() returns the ID of type T. Different values will be
+// returned for different types. Calling the function twice with the
+// same type argument is guaranteed to return the same ID.
+template <typename T>
+TypeId GetTypeId() {
+ // The compiler is required to allocate a different
+ // TypeIdHelper<T>::dummy_ variable for each T used to instantiate
+ // the template. Therefore, the address of dummy_ is guaranteed to
+ // be unique.
+ return &(TypeIdHelper<T>::dummy_);
+}
+
+// Returns the type ID of ::testing::Test. Always call this instead
+// of GetTypeId< ::testing::Test>() to get the type ID of
+// ::testing::Test, as the latter may give the wrong result due to a
+// suspected linker bug when compiling Google Test as a Mac OS X
+// framework.
+GTEST_API_ TypeId GetTestTypeId();
+
+// Defines the abstract factory interface that creates instances
+// of a Test object.
+class TestFactoryBase {
+ public:
+ virtual ~TestFactoryBase() {}
+
+ // Creates a test instance to run. The instance is both created and destroyed
+ // within TestInfoImpl::Run()
+ virtual Test* CreateTest() = 0;
+
+ protected:
+ TestFactoryBase() {}
+
+ private:
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(TestFactoryBase);
+};
+
+// This class provides implementation of TeastFactoryBase interface.
+// It is used in TEST and TEST_F macros.
+template <class TestClass>
+class TestFactoryImpl : public TestFactoryBase {
+ public:
+ virtual Test* CreateTest() { return new TestClass; }
+};
+
+#if GTEST_OS_WINDOWS
+
+// Predicate-formatters for implementing the HRESULT checking macros
+// {ASSERT|EXPECT}_HRESULT_{SUCCEEDED|FAILED}
+// We pass a long instead of HRESULT to avoid causing an
+// include dependency for the HRESULT type.
+GTEST_API_ AssertionResult IsHRESULTSuccess(const char* expr,
+ long hr); // NOLINT
+GTEST_API_ AssertionResult IsHRESULTFailure(const char* expr,
+ long hr); // NOLINT
+
+#endif // GTEST_OS_WINDOWS
+
+// Types of SetUpTestCase() and TearDownTestCase() functions.
+typedef void (*SetUpTestCaseFunc)();
+typedef void (*TearDownTestCaseFunc)();
+
+// Creates a new TestInfo object and registers it with Google Test;
+// returns the created object.
+//
+// Arguments:
+//
+// test_case_name: name of the test case
+// name: name of the test
+// type_param the name of the test's type parameter, or NULL if
+// this is not a typed or a type-parameterized test.
+// value_param text representation of the test's value parameter,
+// or NULL if this is not a type-parameterized test.
+// fixture_class_id: ID of the test fixture class
+// set_up_tc: pointer to the function that sets up the test case
+// tear_down_tc: pointer to the function that tears down the test case
+// factory: pointer to the factory that creates a test object.
+// The newly created TestInfo instance will assume
+// ownership of the factory object.
+GTEST_API_ TestInfo* MakeAndRegisterTestInfo(
+ const char* test_case_name,
+ const char* name,
+ const char* type_param,
+ const char* value_param,
+ TypeId fixture_class_id,
+ SetUpTestCaseFunc set_up_tc,
+ TearDownTestCaseFunc tear_down_tc,
+ TestFactoryBase* factory);
+
+// If *pstr starts with the given prefix, modifies *pstr to be right
+// past the prefix and returns true; otherwise leaves *pstr unchanged
+// and returns false. None of pstr, *pstr, and prefix can be NULL.
+GTEST_API_ bool SkipPrefix(const char* prefix, const char** pstr);
+
+#if GTEST_HAS_TYPED_TEST || GTEST_HAS_TYPED_TEST_P
+
+// State of the definition of a type-parameterized test case.
+class GTEST_API_ TypedTestCasePState {
+ public:
+ TypedTestCasePState() : registered_(false) {}
+
+ // Adds the given test name to defined_test_names_ and return true
+ // if the test case hasn't been registered; otherwise aborts the
+ // program.
+ bool AddTestName(const char* file, int line, const char* case_name,
+ const char* test_name) {
+ if (registered_) {
+ fprintf(stderr, "%s Test %s must be defined before "
+ "REGISTER_TYPED_TEST_CASE_P(%s, ...).\n",
+ FormatFileLocation(file, line).c_str(), test_name, case_name);
+ fflush(stderr);
+ posix::Abort();
+ }
+ defined_test_names_.insert(test_name);
+ return true;
+ }
+
+ // Verifies that registered_tests match the test names in
+ // defined_test_names_; returns registered_tests if successful, or
+ // aborts the program otherwise.
+ const char* VerifyRegisteredTestNames(
+ const char* file, int line, const char* registered_tests);
+
+ private:
+ bool registered_;
+ ::std::set<const char*> defined_test_names_;
+};
+
+// Skips to the first non-space char after the first comma in 'str';
+// returns NULL if no comma is found in 'str'.
+inline const char* SkipComma(const char* str) {
+ const char* comma = strchr(str, ',');
+ if (comma == NULL) {
+ return NULL;
+ }
+ while (IsSpace(*(++comma))) {}
+ return comma;
+}
+
+// Returns the prefix of 'str' before the first comma in it; returns
+// the entire string if it contains no comma.
+inline std::string GetPrefixUntilComma(const char* str) {
+ const char* comma = strchr(str, ',');
+ return comma == NULL ? str : std::string(str, comma);
+}
+
+// TypeParameterizedTest<Fixture, TestSel, Types>::Register()
+// registers a list of type-parameterized tests with Google Test. The
+// return value is insignificant - we just need to return something
+// such that we can call this function in a namespace scope.
+//
+// Implementation note: The GTEST_TEMPLATE_ macro declares a template
+// template parameter. It's defined in gtest-type-util.h.
+template <GTEST_TEMPLATE_ Fixture, class TestSel, typename Types>
+class TypeParameterizedTest {
+ public:
+ // 'index' is the index of the test in the type list 'Types'
+ // specified in INSTANTIATE_TYPED_TEST_CASE_P(Prefix, TestCase,
+ // Types). Valid values for 'index' are [0, N - 1] where N is the
+ // length of Types.
+ static bool Register(const char* prefix, const char* case_name,
+ const char* test_names, int index) {
+ typedef typename Types::Head Type;
+ typedef Fixture<Type> FixtureClass;
+ typedef typename GTEST_BIND_(TestSel, Type) TestClass;
+
+ // First, registers the first type-parameterized test in the type
+ // list.
+ MakeAndRegisterTestInfo(
+ (std::string(prefix) + (prefix[0] == '\0' ? "" : "/") + case_name + "/"
+ + StreamableToString(index)).c_str(),
+ GetPrefixUntilComma(test_names).c_str(),
+ GetTypeName<Type>().c_str(),
+ NULL, // No value parameter.
+ GetTypeId<FixtureClass>(),
+ TestClass::SetUpTestCase,
+ TestClass::TearDownTestCase,
+ new TestFactoryImpl<TestClass>);
+
+ // Next, recurses (at compile time) with the tail of the type list.
+ return TypeParameterizedTest<Fixture, TestSel, typename Types::Tail>
+ ::Register(prefix, case_name, test_names, index + 1);
+ }
+};
+
+// The base case for the compile time recursion.
+template <GTEST_TEMPLATE_ Fixture, class TestSel>
+class TypeParameterizedTest<Fixture, TestSel, Types0> {
+ public:
+ static bool Register(const char* /*prefix*/, const char* /*case_name*/,
+ const char* /*test_names*/, int /*index*/) {
+ return true;
+ }
+};
+
+// TypeParameterizedTestCase<Fixture, Tests, Types>::Register()
+// registers *all combinations* of 'Tests' and 'Types' with Google
+// Test. The return value is insignificant - we just need to return
+// something such that we can call this function in a namespace scope.
+template <GTEST_TEMPLATE_ Fixture, typename Tests, typename Types>
+class TypeParameterizedTestCase {
+ public:
+ static bool Register(const char* prefix, const char* case_name,
+ const char* test_names) {
+ typedef typename Tests::Head Head;
+
+ // First, register the first test in 'Test' for each type in 'Types'.
+ TypeParameterizedTest<Fixture, Head, Types>::Register(
+ prefix, case_name, test_names, 0);
+
+ // Next, recurses (at compile time) with the tail of the test list.
+ return TypeParameterizedTestCase<Fixture, typename Tests::Tail, Types>
+ ::Register(prefix, case_name, SkipComma(test_names));
+ }
+};
+
+// The base case for the compile time recursion.
+template <GTEST_TEMPLATE_ Fixture, typename Types>
+class TypeParameterizedTestCase<Fixture, Templates0, Types> {
+ public:
+ static bool Register(const char* /*prefix*/, const char* /*case_name*/,
+ const char* /*test_names*/) {
+ return true;
+ }
+};
+
+#endif // GTEST_HAS_TYPED_TEST || GTEST_HAS_TYPED_TEST_P
+
+// Returns the current OS stack trace as an std::string.
+//
+// The maximum number of stack frames to be included is specified by
+// the gtest_stack_trace_depth flag. The skip_count parameter
+// specifies the number of top frames to be skipped, which doesn't
+// count against the number of frames to be included.
+//
+// For example, if Foo() calls Bar(), which in turn calls
+// GetCurrentOsStackTraceExceptTop(..., 1), Foo() will be included in
+// the trace but Bar() and GetCurrentOsStackTraceExceptTop() won't.
+GTEST_API_ std::string GetCurrentOsStackTraceExceptTop(
+ UnitTest* unit_test, int skip_count);
+
+// Helpers for suppressing warnings on unreachable code or constant
+// condition.
+
+// Always returns true.
+GTEST_API_ bool AlwaysTrue();
+
+// Always returns false.
+inline bool AlwaysFalse() { return !AlwaysTrue(); }
+
+// Helper for suppressing false warning from Clang on a const char*
+// variable declared in a conditional expression always being NULL in
+// the else branch.
+struct GTEST_API_ ConstCharPtr {
+ ConstCharPtr(const char* str) : value(str) {}
+ operator bool() const { return true; }
+ const char* value;
+};
+
+// A simple Linear Congruential Generator for generating random
+// numbers with a uniform distribution. Unlike rand() and srand(), it
+// doesn't use global state (and therefore can't interfere with user
+// code). Unlike rand_r(), it's portable. An LCG isn't very random,
+// but it's good enough for our purposes.
+class GTEST_API_ Random {
+ public:
+ static const UInt32 kMaxRange = 1u << 31;
+
+ explicit Random(UInt32 seed) : state_(seed) {}
+
+ void Reseed(UInt32 seed) { state_ = seed; }
+
+ // Generates a random number from [0, range). Crashes if 'range' is
+ // 0 or greater than kMaxRange.
+ UInt32 Generate(UInt32 range);
+
+ private:
+ UInt32 state_;
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(Random);
+};
+
+// Defining a variable of type CompileAssertTypesEqual<T1, T2> will cause a
+// compiler error iff T1 and T2 are different types.
+template <typename T1, typename T2>
+struct CompileAssertTypesEqual;
+
+template <typename T>
+struct CompileAssertTypesEqual<T, T> {
+};
+
+// Removes the reference from a type if it is a reference type,
+// otherwise leaves it unchanged. This is the same as
+// tr1::remove_reference, which is not widely available yet.
+template <typename T>
+struct RemoveReference { typedef T type; }; // NOLINT
+template <typename T>
+struct RemoveReference<T&> { typedef T type; }; // NOLINT
+
+// A handy wrapper around RemoveReference that works when the argument
+// T depends on template parameters.
+#define GTEST_REMOVE_REFERENCE_(T) \
+ typename ::testing::internal::RemoveReference<T>::type
+
+// Removes const from a type if it is a const type, otherwise leaves
+// it unchanged. This is the same as tr1::remove_const, which is not
+// widely available yet.
+template <typename T>
+struct RemoveConst { typedef T type; }; // NOLINT
+template <typename T>
+struct RemoveConst<const T> { typedef T type; }; // NOLINT
+
+// MSVC 8.0, Sun C++, and IBM XL C++ have a bug which causes the above
+// definition to fail to remove the const in 'const int[3]' and 'const
+// char[3][4]'. The following specialization works around the bug.
+template <typename T, size_t N>
+struct RemoveConst<const T[N]> {
+ typedef typename RemoveConst<T>::type type[N];
+};
+
+#if defined(_MSC_VER) && _MSC_VER < 1400
+// This is the only specialization that allows VC++ 7.1 to remove const in
+// 'const int[3] and 'const int[3][4]'. However, it causes trouble with GCC
+// and thus needs to be conditionally compiled.
+template <typename T, size_t N>
+struct RemoveConst<T[N]> {
+ typedef typename RemoveConst<T>::type type[N];
+};
+#endif
+
+// A handy wrapper around RemoveConst that works when the argument
+// T depends on template parameters.
+#define GTEST_REMOVE_CONST_(T) \
+ typename ::testing::internal::RemoveConst<T>::type
+
+// Turns const U&, U&, const U, and U all into U.
+#define GTEST_REMOVE_REFERENCE_AND_CONST_(T) \
+ GTEST_REMOVE_CONST_(GTEST_REMOVE_REFERENCE_(T))
+
+// Adds reference to a type if it is not a reference type,
+// otherwise leaves it unchanged. This is the same as
+// tr1::add_reference, which is not widely available yet.
+template <typename T>
+struct AddReference { typedef T& type; }; // NOLINT
+template <typename T>
+struct AddReference<T&> { typedef T& type; }; // NOLINT
+
+// A handy wrapper around AddReference that works when the argument T
+// depends on template parameters.
+#define GTEST_ADD_REFERENCE_(T) \
+ typename ::testing::internal::AddReference<T>::type
+
+// Adds a reference to const on top of T as necessary. For example,
+// it transforms
+//
+// char ==> const char&
+// const char ==> const char&
+// char& ==> const char&
+// const char& ==> const char&
+//
+// The argument T must depend on some template parameters.
+#define GTEST_REFERENCE_TO_CONST_(T) \
+ GTEST_ADD_REFERENCE_(const GTEST_REMOVE_REFERENCE_(T))
+
+// ImplicitlyConvertible<From, To>::value is a compile-time bool
+// constant that's true iff type From can be implicitly converted to
+// type To.
+template <typename From, typename To>
+class ImplicitlyConvertible {
+ private:
+ // We need the following helper functions only for their types.
+ // They have no implementations.
+
+ // MakeFrom() is an expression whose type is From. We cannot simply
+ // use From(), as the type From may not have a public default
+ // constructor.
+ static From MakeFrom();
+
+ // These two functions are overloaded. Given an expression
+ // Helper(x), the compiler will pick the first version if x can be
+ // implicitly converted to type To; otherwise it will pick the
+ // second version.
+ //
+ // The first version returns a value of size 1, and the second
+ // version returns a value of size 2. Therefore, by checking the
+ // size of Helper(x), which can be done at compile time, we can tell
+ // which version of Helper() is used, and hence whether x can be
+ // implicitly converted to type To.
+ static char Helper(To);
+ static char (&Helper(...))[2]; // NOLINT
+
+ // We have to put the 'public' section after the 'private' section,
+ // or MSVC refuses to compile the code.
+ public:
+ // MSVC warns about implicitly converting from double to int for
+ // possible loss of data, so we need to temporarily disable the
+ // warning.
+#ifdef _MSC_VER
+# pragma warning(push) // Saves the current warning state.
+# pragma warning(disable:4244) // Temporarily disables warning 4244.
+
+ static const bool value =
+ sizeof(Helper(ImplicitlyConvertible::MakeFrom())) == 1;
+# pragma warning(pop) // Restores the warning state.
+#elif defined(__BORLANDC__)
+ // C++Builder cannot use member overload resolution during template
+ // instantiation. The simplest workaround is to use its C++0x type traits
+ // functions (C++Builder 2009 and above only).
+ static const bool value = __is_convertible(From, To);
+#else
+ static const bool value =
+ sizeof(Helper(ImplicitlyConvertible::MakeFrom())) == 1;
+#endif // _MSV_VER
+};
+template <typename From, typename To>
+const bool ImplicitlyConvertible<From, To>::value;
+
+// IsAProtocolMessage<T>::value is a compile-time bool constant that's
+// true iff T is type ProtocolMessage, proto2::Message, or a subclass
+// of those.
+template <typename T>
+struct IsAProtocolMessage
+ : public bool_constant<
+ ImplicitlyConvertible<const T*, const ::ProtocolMessage*>::value ||
+ ImplicitlyConvertible<const T*, const ::proto2::Message*>::value> {
+};
+
+// When the compiler sees expression IsContainerTest<C>(0), if C is an
+// STL-style container class, the first overload of IsContainerTest
+// will be viable (since both C::iterator* and C::const_iterator* are
+// valid types and NULL can be implicitly converted to them). It will
+// be picked over the second overload as 'int' is a perfect match for
+// the type of argument 0. If C::iterator or C::const_iterator is not
+// a valid type, the first overload is not viable, and the second
+// overload will be picked. Therefore, we can determine whether C is
+// a container class by checking the type of IsContainerTest<C>(0).
+// The value of the expression is insignificant.
+//
+// Note that we look for both C::iterator and C::const_iterator. The
+// reason is that C++ injects the name of a class as a member of the
+// class itself (e.g. you can refer to class iterator as either
+// 'iterator' or 'iterator::iterator'). If we look for C::iterator
+// only, for example, we would mistakenly think that a class named
+// iterator is an STL container.
+//
+// Also note that the simpler approach of overloading
+// IsContainerTest(typename C::const_iterator*) and
+// IsContainerTest(...) doesn't work with Visual Age C++ and Sun C++.
+typedef int IsContainer;
+template <class C>
+IsContainer IsContainerTest(int /* dummy */,
+ typename C::iterator* /* it */ = NULL,
+ typename C::const_iterator* /* const_it */ = NULL) {
+ return 0;
+}
+
+typedef char IsNotContainer;
+template <class C>
+IsNotContainer IsContainerTest(long /* dummy */) { return '\0'; }
+
+// EnableIf<condition>::type is void when 'Cond' is true, and
+// undefined when 'Cond' is false. To use SFINAE to make a function
+// overload only apply when a particular expression is true, add
+// "typename EnableIf<expression>::type* = 0" as the last parameter.
+template<bool> struct EnableIf;
+template<> struct EnableIf<true> { typedef void type; }; // NOLINT
+
+// Utilities for native arrays.
+
+// ArrayEq() compares two k-dimensional native arrays using the
+// elements' operator==, where k can be any integer >= 0. When k is
+// 0, ArrayEq() degenerates into comparing a single pair of values.
+
+template <typename T, typename U>
+bool ArrayEq(const T* lhs, size_t size, const U* rhs);
+
+// This generic version is used when k is 0.
+template <typename T, typename U>
+inline bool ArrayEq(const T& lhs, const U& rhs) { return lhs == rhs; }
+
+// This overload is used when k >= 1.
+template <typename T, typename U, size_t N>
+inline bool ArrayEq(const T(&lhs)[N], const U(&rhs)[N]) {
+ return internal::ArrayEq(lhs, N, rhs);
+}
+
+// This helper reduces code bloat. If we instead put its logic inside
+// the previous ArrayEq() function, arrays with different sizes would
+// lead to different copies of the template code.
+template <typename T, typename U>
+bool ArrayEq(const T* lhs, size_t size, const U* rhs) {
+ for (size_t i = 0; i != size; i++) {
+ if (!internal::ArrayEq(lhs[i], rhs[i]))
+ return false;
+ }
+ return true;
+}
+
+// Finds the first element in the iterator range [begin, end) that
+// equals elem. Element may be a native array type itself.
+template <typename Iter, typename Element>
+Iter ArrayAwareFind(Iter begin, Iter end, const Element& elem) {
+ for (Iter it = begin; it != end; ++it) {
+ if (internal::ArrayEq(*it, elem))
+ return it;
+ }
+ return end;
+}
+
+// CopyArray() copies a k-dimensional native array using the elements'
+// operator=, where k can be any integer >= 0. When k is 0,
+// CopyArray() degenerates into copying a single value.
+
+template <typename T, typename U>
+void CopyArray(const T* from, size_t size, U* to);
+
+// This generic version is used when k is 0.
+template <typename T, typename U>
+inline void CopyArray(const T& from, U* to) { *to = from; }
+
+// This overload is used when k >= 1.
+template <typename T, typename U, size_t N>
+inline void CopyArray(const T(&from)[N], U(*to)[N]) {
+ internal::CopyArray(from, N, *to);
+}
+
+// This helper reduces code bloat. If we instead put its logic inside
+// the previous CopyArray() function, arrays with different sizes
+// would lead to different copies of the template code.
+template <typename T, typename U>
+void CopyArray(const T* from, size_t size, U* to) {
+ for (size_t i = 0; i != size; i++) {
+ internal::CopyArray(from[i], to + i);
+ }
+}
+
+// The relation between an NativeArray object (see below) and the
+// native array it represents.
+enum RelationToSource {
+ kReference, // The NativeArray references the native array.
+ kCopy // The NativeArray makes a copy of the native array and
+ // owns the copy.
+};
+
+// Adapts a native array to a read-only STL-style container. Instead
+// of the complete STL container concept, this adaptor only implements
+// members useful for Google Mock's container matchers. New members
+// should be added as needed. To simplify the implementation, we only
+// support Element being a raw type (i.e. having no top-level const or
+// reference modifier). It's the client's responsibility to satisfy
+// this requirement. Element can be an array type itself (hence
+// multi-dimensional arrays are supported).
+template <typename Element>
+class NativeArray {
+ public:
+ // STL-style container typedefs.
+ typedef Element value_type;
+ typedef Element* iterator;
+ typedef const Element* const_iterator;
+
+ // Constructs from a native array.
+ NativeArray(const Element* array, size_t count, RelationToSource relation) {
+ Init(array, count, relation);
+ }
+
+ // Copy constructor.
+ NativeArray(const NativeArray& rhs) {
+ Init(rhs.array_, rhs.size_, rhs.relation_to_source_);
+ }
+
+ ~NativeArray() {
+ // Ensures that the user doesn't instantiate NativeArray with a
+ // const or reference type.
+ static_cast<void>(StaticAssertTypeEqHelper<Element,
+ GTEST_REMOVE_REFERENCE_AND_CONST_(Element)>());
+ if (relation_to_source_ == kCopy)
+ delete[] array_;
+ }
+
+ // STL-style container methods.
+ size_t size() const { return size_; }
+ const_iterator begin() const { return array_; }
+ const_iterator end() const { return array_ + size_; }
+ bool operator==(const NativeArray& rhs) const {
+ return size() == rhs.size() &&
+ ArrayEq(begin(), size(), rhs.begin());
+ }
+
+ private:
+ // Initializes this object; makes a copy of the input array if
+ // 'relation' is kCopy.
+ void Init(const Element* array, size_t a_size, RelationToSource relation) {
+ if (relation == kReference) {
+ array_ = array;
+ } else {
+ Element* const copy = new Element[a_size];
+ CopyArray(array, a_size, copy);
+ array_ = copy;
+ }
+ size_ = a_size;
+ relation_to_source_ = relation;
+ }
+
+ const Element* array_;
+ size_t size_;
+ RelationToSource relation_to_source_;
+
+ GTEST_DISALLOW_ASSIGN_(NativeArray);
+};
+
+} // namespace internal
+} // namespace testing
+
+#define GTEST_MESSAGE_AT_(file, line, message, result_type) \
+ ::testing::internal::AssertHelper(result_type, file, line, message) \
+ = ::testing::Message()
+
+#define GTEST_MESSAGE_(message, result_type) \
+ GTEST_MESSAGE_AT_(__FILE__, __LINE__, message, result_type)
+
+#define GTEST_FATAL_FAILURE_(message) \
+ return GTEST_MESSAGE_(message, ::testing::TestPartResult::kFatalFailure)
+
+#define GTEST_NONFATAL_FAILURE_(message) \
+ GTEST_MESSAGE_(message, ::testing::TestPartResult::kNonFatalFailure)
+
+#define GTEST_SUCCESS_(message) \
+ GTEST_MESSAGE_(message, ::testing::TestPartResult::kSuccess)
+
+// Suppresses MSVC warnings 4072 (unreachable code) for the code following
+// statement if it returns or throws (or doesn't return or throw in some
+// situations).
+#define GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement) \
+ if (::testing::internal::AlwaysTrue()) { statement; }
+
+#define GTEST_TEST_THROW_(statement, expected_exception, fail) \
+ GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
+ if (::testing::internal::ConstCharPtr gtest_msg = "") { \
+ bool gtest_caught_expected = false; \
+ try { \
+ GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \
+ } \
+ catch (expected_exception const&) { \
+ gtest_caught_expected = true; \
+ } \
+ catch (...) { \
+ gtest_msg.value = \
+ "Expected: " #statement " throws an exception of type " \
+ #expected_exception ".\n Actual: it throws a different type."; \
+ goto GTEST_CONCAT_TOKEN_(gtest_label_testthrow_, __LINE__); \
+ } \
+ if (!gtest_caught_expected) { \
+ gtest_msg.value = \
+ "Expected: " #statement " throws an exception of type " \
+ #expected_exception ".\n Actual: it throws nothing."; \
+ goto GTEST_CONCAT_TOKEN_(gtest_label_testthrow_, __LINE__); \
+ } \
+ } else \
+ GTEST_CONCAT_TOKEN_(gtest_label_testthrow_, __LINE__): \
+ fail(gtest_msg.value)
+
+#define GTEST_TEST_NO_THROW_(statement, fail) \
+ GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
+ if (::testing::internal::AlwaysTrue()) { \
+ try { \
+ GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \
+ } \
+ catch (...) { \
+ goto GTEST_CONCAT_TOKEN_(gtest_label_testnothrow_, __LINE__); \
+ } \
+ } else \
+ GTEST_CONCAT_TOKEN_(gtest_label_testnothrow_, __LINE__): \
+ fail("Expected: " #statement " doesn't throw an exception.\n" \
+ " Actual: it throws.")
+
+#define GTEST_TEST_ANY_THROW_(statement, fail) \
+ GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
+ if (::testing::internal::AlwaysTrue()) { \
+ bool gtest_caught_any = false; \
+ try { \
+ GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \
+ } \
+ catch (...) { \
+ gtest_caught_any = true; \
+ } \
+ if (!gtest_caught_any) { \
+ goto GTEST_CONCAT_TOKEN_(gtest_label_testanythrow_, __LINE__); \
+ } \
+ } else \
+ GTEST_CONCAT_TOKEN_(gtest_label_testanythrow_, __LINE__): \
+ fail("Expected: " #statement " throws an exception.\n" \
+ " Actual: it doesn't.")
+
+
+// Implements Boolean test assertions such as EXPECT_TRUE. expression can be
+// either a boolean expression or an AssertionResult. text is a textual
+// represenation of expression as it was passed into the EXPECT_TRUE.
+#define GTEST_TEST_BOOLEAN_(expression, text, actual, expected, fail) \
+ GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
+ if (const ::testing::AssertionResult gtest_ar_ = \
+ ::testing::AssertionResult(expression)) \
+ ; \
+ else \
+ fail(::testing::internal::GetBoolAssertionFailureMessage(\
+ gtest_ar_, text, #actual, #expected).c_str())
+
+#define GTEST_TEST_NO_FATAL_FAILURE_(statement, fail) \
+ GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
+ if (::testing::internal::AlwaysTrue()) { \
+ ::testing::internal::HasNewFatalFailureHelper gtest_fatal_failure_checker; \
+ GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \
+ if (gtest_fatal_failure_checker.has_new_fatal_failure()) { \
+ goto GTEST_CONCAT_TOKEN_(gtest_label_testnofatal_, __LINE__); \
+ } \
+ } else \
+ GTEST_CONCAT_TOKEN_(gtest_label_testnofatal_, __LINE__): \
+ fail("Expected: " #statement " doesn't generate new fatal " \
+ "failures in the current thread.\n" \
+ " Actual: it does.")
+
+// Expands to the name of the class that implements the given test.
+#define GTEST_TEST_CLASS_NAME_(test_case_name, test_name) \
+ test_case_name##_##test_name##_Test
+
+// Helper macro for defining tests.
+#define GTEST_TEST_(test_case_name, test_name, parent_class, parent_id)\
+class GTEST_TEST_CLASS_NAME_(test_case_name, test_name) : public parent_class {\
+ public:\
+ GTEST_TEST_CLASS_NAME_(test_case_name, test_name)() {}\
+ private:\
+ virtual void TestBody();\
+ static ::testing::TestInfo* const test_info_ GTEST_ATTRIBUTE_UNUSED_;\
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(\
+ GTEST_TEST_CLASS_NAME_(test_case_name, test_name));\
+};\
+\
+::testing::TestInfo* const GTEST_TEST_CLASS_NAME_(test_case_name, test_name)\
+ ::test_info_ =\
+ ::testing::internal::MakeAndRegisterTestInfo(\
+ #test_case_name, #test_name, NULL, NULL, \
+ (parent_id), \
+ parent_class::SetUpTestCase, \
+ parent_class::TearDownTestCase, \
+ new ::testing::internal::TestFactoryImpl<\
+ GTEST_TEST_CLASS_NAME_(test_case_name, test_name)>);\
+void GTEST_TEST_CLASS_NAME_(test_case_name, test_name)::TestBody()
+
+#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_INTERNAL_H_
+// Copyright 2005, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan)
+//
+// The Google C++ Testing Framework (Google Test)
+//
+// This header file defines the public API for death tests. It is
+// #included by gtest.h so a user doesn't need to include this
+// directly.
+
+#ifndef GTEST_INCLUDE_GTEST_GTEST_DEATH_TEST_H_
+#define GTEST_INCLUDE_GTEST_GTEST_DEATH_TEST_H_
+
+// Copyright 2005, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Authors: wan@google.com (Zhanyong Wan), eefacm@gmail.com (Sean Mcafee)
+//
+// The Google C++ Testing Framework (Google Test)
+//
+// This header file defines internal utilities needed for implementing
+// death tests. They are subject to change without notice.
+
+#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_DEATH_TEST_INTERNAL_H_
+#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_DEATH_TEST_INTERNAL_H_
+
+
+#include <stdio.h>
+
+namespace testing {
+namespace internal {
+
+GTEST_DECLARE_string_(internal_run_death_test);
+
+// Names of the flags (needed for parsing Google Test flags).
+const char kDeathTestStyleFlag[] = "death_test_style";
+const char kDeathTestUseFork[] = "death_test_use_fork";
+const char kInternalRunDeathTestFlag[] = "internal_run_death_test";
+
+#if GTEST_HAS_DEATH_TEST
+
+// DeathTest is a class that hides much of the complexity of the
+// GTEST_DEATH_TEST_ macro. It is abstract; its static Create method
+// returns a concrete class that depends on the prevailing death test
+// style, as defined by the --gtest_death_test_style and/or
+// --gtest_internal_run_death_test flags.
+
+// In describing the results of death tests, these terms are used with
+// the corresponding definitions:
+//
+// exit status: The integer exit information in the format specified
+// by wait(2)
+// exit code: The integer code passed to exit(3), _exit(2), or
+// returned from main()
+class GTEST_API_ DeathTest {
+ public:
+ // Create returns false if there was an error determining the
+ // appropriate action to take for the current death test; for example,
+ // if the gtest_death_test_style flag is set to an invalid value.
+ // The LastMessage method will return a more detailed message in that
+ // case. Otherwise, the DeathTest pointer pointed to by the "test"
+ // argument is set. If the death test should be skipped, the pointer
+ // is set to NULL; otherwise, it is set to the address of a new concrete
+ // DeathTest object that controls the execution of the current test.
+ static bool Create(const char* statement, const RE* regex,
+ const char* file, int line, DeathTest** test);
+ DeathTest();
+ virtual ~DeathTest() { }
+
+ // A helper class that aborts a death test when it's deleted.
+ class ReturnSentinel {
+ public:
+ explicit ReturnSentinel(DeathTest* test) : test_(test) { }
+ ~ReturnSentinel() { test_->Abort(TEST_ENCOUNTERED_RETURN_STATEMENT); }
+ private:
+ DeathTest* const test_;
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(ReturnSentinel);
+ } GTEST_ATTRIBUTE_UNUSED_;
+
+ // An enumeration of possible roles that may be taken when a death
+ // test is encountered. EXECUTE means that the death test logic should
+ // be executed immediately. OVERSEE means that the program should prepare
+ // the appropriate environment for a child process to execute the death
+ // test, then wait for it to complete.
+ enum TestRole { OVERSEE_TEST, EXECUTE_TEST };
+
+ // An enumeration of the three reasons that a test might be aborted.
+ enum AbortReason {
+ TEST_ENCOUNTERED_RETURN_STATEMENT,
+ TEST_THREW_EXCEPTION,
+ TEST_DID_NOT_DIE
+ };
+
+ // Assumes one of the above roles.
+ virtual TestRole AssumeRole() = 0;
+
+ // Waits for the death test to finish and returns its status.
+ virtual int Wait() = 0;
+
+ // Returns true if the death test passed; that is, the test process
+ // exited during the test, its exit status matches a user-supplied
+ // predicate, and its stderr output matches a user-supplied regular
+ // expression.
+ // The user-supplied predicate may be a macro expression rather
+ // than a function pointer or functor, or else Wait and Passed could
+ // be combined.
+ virtual bool Passed(bool exit_status_ok) = 0;
+
+ // Signals that the death test did not die as expected.
+ virtual void Abort(AbortReason reason) = 0;
+
+ // Returns a human-readable outcome message regarding the outcome of
+ // the last death test.
+ static const char* LastMessage();
+
+ static void set_last_death_test_message(const std::string& message);
+
+ private:
+ // A string containing a description of the outcome of the last death test.
+ static std::string last_death_test_message_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(DeathTest);
+};
+
+// Factory interface for death tests. May be mocked out for testing.
+class DeathTestFactory {
+ public:
+ virtual ~DeathTestFactory() { }
+ virtual bool Create(const char* statement, const RE* regex,
+ const char* file, int line, DeathTest** test) = 0;
+};
+
+// A concrete DeathTestFactory implementation for normal use.
+class DefaultDeathTestFactory : public DeathTestFactory {
+ public:
+ virtual bool Create(const char* statement, const RE* regex,
+ const char* file, int line, DeathTest** test);
+};
+
+// Returns true if exit_status describes a process that was terminated
+// by a signal, or exited normally with a nonzero exit code.
+GTEST_API_ bool ExitedUnsuccessfully(int exit_status);
+
+// Traps C++ exceptions escaping statement and reports them as test
+// failures. Note that trapping SEH exceptions is not implemented here.
+# if GTEST_HAS_EXCEPTIONS
+# define GTEST_EXECUTE_DEATH_TEST_STATEMENT_(statement, death_test) \
+ try { \
+ GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \
+ } catch (const ::std::exception& gtest_exception) { \
+ fprintf(\
+ stderr, \
+ "\n%s: Caught std::exception-derived exception escaping the " \
+ "death test statement. Exception message: %s\n", \
+ ::testing::internal::FormatFileLocation(__FILE__, __LINE__).c_str(), \
+ gtest_exception.what()); \
+ fflush(stderr); \
+ death_test->Abort(::testing::internal::DeathTest::TEST_THREW_EXCEPTION); \
+ } catch (...) { \
+ death_test->Abort(::testing::internal::DeathTest::TEST_THREW_EXCEPTION); \
+ }
+
+# else
+# define GTEST_EXECUTE_DEATH_TEST_STATEMENT_(statement, death_test) \
+ GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement)
+
+# endif
+
+// This macro is for implementing ASSERT_DEATH*, EXPECT_DEATH*,
+// ASSERT_EXIT*, and EXPECT_EXIT*.
+# define GTEST_DEATH_TEST_(statement, predicate, regex, fail) \
+ GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
+ if (::testing::internal::AlwaysTrue()) { \
+ const ::testing::internal::RE& gtest_regex = (regex); \
+ ::testing::internal::DeathTest* gtest_dt; \
+ if (!::testing::internal::DeathTest::Create(#statement, &gtest_regex, \
+ __FILE__, __LINE__, &gtest_dt)) { \
+ goto GTEST_CONCAT_TOKEN_(gtest_label_, __LINE__); \
+ } \
+ if (gtest_dt != NULL) { \
+ ::testing::internal::scoped_ptr< ::testing::internal::DeathTest> \
+ gtest_dt_ptr(gtest_dt); \
+ switch (gtest_dt->AssumeRole()) { \
+ case ::testing::internal::DeathTest::OVERSEE_TEST: \
+ if (!gtest_dt->Passed(predicate(gtest_dt->Wait()))) { \
+ goto GTEST_CONCAT_TOKEN_(gtest_label_, __LINE__); \
+ } \
+ break; \
+ case ::testing::internal::DeathTest::EXECUTE_TEST: { \
+ ::testing::internal::DeathTest::ReturnSentinel \
+ gtest_sentinel(gtest_dt); \
+ GTEST_EXECUTE_DEATH_TEST_STATEMENT_(statement, gtest_dt); \
+ gtest_dt->Abort(::testing::internal::DeathTest::TEST_DID_NOT_DIE); \
+ break; \
+ } \
+ default: \
+ break; \
+ } \
+ } \
+ } else \
+ GTEST_CONCAT_TOKEN_(gtest_label_, __LINE__): \
+ fail(::testing::internal::DeathTest::LastMessage())
+// The symbol "fail" here expands to something into which a message
+// can be streamed.
+
+// This macro is for implementing ASSERT/EXPECT_DEBUG_DEATH when compiled in
+// NDEBUG mode. In this case we need the statements to be executed, the regex is
+// ignored, and the macro must accept a streamed message even though the message
+// is never printed.
+# define GTEST_EXECUTE_STATEMENT_(statement, regex) \
+ GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
+ if (::testing::internal::AlwaysTrue()) { \
+ GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \
+ } else \
+ ::testing::Message()
+
+// A class representing the parsed contents of the
+// --gtest_internal_run_death_test flag, as it existed when
+// RUN_ALL_TESTS was called.
+class InternalRunDeathTestFlag {
+ public:
+ InternalRunDeathTestFlag(const std::string& a_file,
+ int a_line,
+ int an_index,
+ int a_write_fd)
+ : file_(a_file), line_(a_line), index_(an_index),
+ write_fd_(a_write_fd) {}
+
+ ~InternalRunDeathTestFlag() {
+ if (write_fd_ >= 0)
+ posix::Close(write_fd_);
+ }
+
+ const std::string& file() const { return file_; }
+ int line() const { return line_; }
+ int index() const { return index_; }
+ int write_fd() const { return write_fd_; }
+
+ private:
+ std::string file_;
+ int line_;
+ int index_;
+ int write_fd_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(InternalRunDeathTestFlag);
+};
+
+// Returns a newly created InternalRunDeathTestFlag object with fields
+// initialized from the GTEST_FLAG(internal_run_death_test) flag if
+// the flag is specified; otherwise returns NULL.
+InternalRunDeathTestFlag* ParseInternalRunDeathTestFlag();
+
+#else // GTEST_HAS_DEATH_TEST
+
+// This macro is used for implementing macros such as
+// EXPECT_DEATH_IF_SUPPORTED and ASSERT_DEATH_IF_SUPPORTED on systems where
+// death tests are not supported. Those macros must compile on such systems
+// iff EXPECT_DEATH and ASSERT_DEATH compile with the same parameters on
+// systems that support death tests. This allows one to write such a macro
+// on a system that does not support death tests and be sure that it will
+// compile on a death-test supporting system.
+//
+// Parameters:
+// statement - A statement that a macro such as EXPECT_DEATH would test
+// for program termination. This macro has to make sure this
+// statement is compiled but not executed, to ensure that
+// EXPECT_DEATH_IF_SUPPORTED compiles with a certain
+// parameter iff EXPECT_DEATH compiles with it.
+// regex - A regex that a macro such as EXPECT_DEATH would use to test
+// the output of statement. This parameter has to be
+// compiled but not evaluated by this macro, to ensure that
+// this macro only accepts expressions that a macro such as
+// EXPECT_DEATH would accept.
+// terminator - Must be an empty statement for EXPECT_DEATH_IF_SUPPORTED
+// and a return statement for ASSERT_DEATH_IF_SUPPORTED.
+// This ensures that ASSERT_DEATH_IF_SUPPORTED will not
+// compile inside functions where ASSERT_DEATH doesn't
+// compile.
+//
+// The branch that has an always false condition is used to ensure that
+// statement and regex are compiled (and thus syntactically correct) but
+// never executed. The unreachable code macro protects the terminator
+// statement from generating an 'unreachable code' warning in case
+// statement unconditionally returns or throws. The Message constructor at
+// the end allows the syntax of streaming additional messages into the
+// macro, for compilational compatibility with EXPECT_DEATH/ASSERT_DEATH.
+# define GTEST_UNSUPPORTED_DEATH_TEST_(statement, regex, terminator) \
+ GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
+ if (::testing::internal::AlwaysTrue()) { \
+ GTEST_LOG_(WARNING) \
+ << "Death tests are not supported on this platform.\n" \
+ << "Statement '" #statement "' cannot be verified."; \
+ } else if (::testing::internal::AlwaysFalse()) { \
+ ::testing::internal::RE::PartialMatch(".*", (regex)); \
+ GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \
+ terminator; \
+ } else \
+ ::testing::Message()
+
+#endif // GTEST_HAS_DEATH_TEST
+
+} // namespace internal
+} // namespace testing
+
+#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_DEATH_TEST_INTERNAL_H_
+
+namespace testing {
+
+// This flag controls the style of death tests. Valid values are "threadsafe",
+// meaning that the death test child process will re-execute the test binary
+// from the start, running only a single death test, or "fast",
+// meaning that the child process will execute the test logic immediately
+// after forking.
+GTEST_DECLARE_string_(death_test_style);
+
+#if GTEST_HAS_DEATH_TEST
+
+namespace internal {
+
+// Returns a Boolean value indicating whether the caller is currently
+// executing in the context of the death test child process. Tools such as
+// Valgrind heap checkers may need this to modify their behavior in death
+// tests. IMPORTANT: This is an internal utility. Using it may break the
+// implementation of death tests. User code MUST NOT use it.
+GTEST_API_ bool InDeathTestChild();
+
+} // namespace internal
+
+// The following macros are useful for writing death tests.
+
+// Here's what happens when an ASSERT_DEATH* or EXPECT_DEATH* is
+// executed:
+//
+// 1. It generates a warning if there is more than one active
+// thread. This is because it's safe to fork() or clone() only
+// when there is a single thread.
+//
+// 2. The parent process clone()s a sub-process and runs the death
+// test in it; the sub-process exits with code 0 at the end of the
+// death test, if it hasn't exited already.
+//
+// 3. The parent process waits for the sub-process to terminate.
+//
+// 4. The parent process checks the exit code and error message of
+// the sub-process.
+//
+// Examples:
+//
+// ASSERT_DEATH(server.SendMessage(56, "Hello"), "Invalid port number");
+// for (int i = 0; i < 5; i++) {
+// EXPECT_DEATH(server.ProcessRequest(i),
+// "Invalid request .* in ProcessRequest()")
+// << "Failed to die on request " << i;
+// }
+//
+// ASSERT_EXIT(server.ExitNow(), ::testing::ExitedWithCode(0), "Exiting");
+//
+// bool KilledBySIGHUP(int exit_code) {
+// return WIFSIGNALED(exit_code) && WTERMSIG(exit_code) == SIGHUP;
+// }
+//
+// ASSERT_EXIT(client.HangUpServer(), KilledBySIGHUP, "Hanging up!");
+//
+// On the regular expressions used in death tests:
+//
+// On POSIX-compliant systems (*nix), we use the <regex.h> library,
+// which uses the POSIX extended regex syntax.
+//
+// On other platforms (e.g. Windows), we only support a simple regex
+// syntax implemented as part of Google Test. This limited
+// implementation should be enough most of the time when writing
+// death tests; though it lacks many features you can find in PCRE
+// or POSIX extended regex syntax. For example, we don't support
+// union ("x|y"), grouping ("(xy)"), brackets ("[xy]"), and
+// repetition count ("x{5,7}"), among others.
+//
+// Below is the syntax that we do support. We chose it to be a
+// subset of both PCRE and POSIX extended regex, so it's easy to
+// learn wherever you come from. In the following: 'A' denotes a
+// literal character, period (.), or a single \\ escape sequence;
+// 'x' and 'y' denote regular expressions; 'm' and 'n' are for
+// natural numbers.
+//
+// c matches any literal character c
+// \\d matches any decimal digit
+// \\D matches any character that's not a decimal digit
+// \\f matches \f
+// \\n matches \n
+// \\r matches \r
+// \\s matches any ASCII whitespace, including \n
+// \\S matches any character that's not a whitespace
+// \\t matches \t
+// \\v matches \v
+// \\w matches any letter, _, or decimal digit
+// \\W matches any character that \\w doesn't match
+// \\c matches any literal character c, which must be a punctuation
+// . matches any single character except \n
+// A? matches 0 or 1 occurrences of A
+// A* matches 0 or many occurrences of A
+// A+ matches 1 or many occurrences of A
+// ^ matches the beginning of a string (not that of each line)
+// $ matches the end of a string (not that of each line)
+// xy matches x followed by y
+//
+// If you accidentally use PCRE or POSIX extended regex features
+// not implemented by us, you will get a run-time failure. In that
+// case, please try to rewrite your regular expression within the
+// above syntax.
+//
+// This implementation is *not* meant to be as highly tuned or robust
+// as a compiled regex library, but should perform well enough for a
+// death test, which already incurs significant overhead by launching
+// a child process.
+//
+// Known caveats:
+//
+// A "threadsafe" style death test obtains the path to the test
+// program from argv[0] and re-executes it in the sub-process. For
+// simplicity, the current implementation doesn't search the PATH
+// when launching the sub-process. This means that the user must
+// invoke the test program via a path that contains at least one
+// path separator (e.g. path/to/foo_test and
+// /absolute/path/to/bar_test are fine, but foo_test is not). This
+// is rarely a problem as people usually don't put the test binary
+// directory in PATH.
+//
+// TODO(wan@google.com): make thread-safe death tests search the PATH.
+
+// Asserts that a given statement causes the program to exit, with an
+// integer exit status that satisfies predicate, and emitting error output
+// that matches regex.
+# define ASSERT_EXIT(statement, predicate, regex) \
+ GTEST_DEATH_TEST_(statement, predicate, regex, GTEST_FATAL_FAILURE_)
+
+// Like ASSERT_EXIT, but continues on to successive tests in the
+// test case, if any:
+# define EXPECT_EXIT(statement, predicate, regex) \
+ GTEST_DEATH_TEST_(statement, predicate, regex, GTEST_NONFATAL_FAILURE_)
+
+// Asserts that a given statement causes the program to exit, either by
+// explicitly exiting with a nonzero exit code or being killed by a
+// signal, and emitting error output that matches regex.
+# define ASSERT_DEATH(statement, regex) \
+ ASSERT_EXIT(statement, ::testing::internal::ExitedUnsuccessfully, regex)
+
+// Like ASSERT_DEATH, but continues on to successive tests in the
+// test case, if any:
+# define EXPECT_DEATH(statement, regex) \
+ EXPECT_EXIT(statement, ::testing::internal::ExitedUnsuccessfully, regex)
+
+// Two predicate classes that can be used in {ASSERT,EXPECT}_EXIT*:
+
+// Tests that an exit code describes a normal exit with a given exit code.
+class GTEST_API_ ExitedWithCode {
+ public:
+ explicit ExitedWithCode(int exit_code);
+ bool operator()(int exit_status) const;
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ExitedWithCode& other);
+
+ const int exit_code_;
+};
+
+# if !GTEST_OS_WINDOWS
+// Tests that an exit code describes an exit due to termination by a
+// given signal.
+class GTEST_API_ KilledBySignal {
+ public:
+ explicit KilledBySignal(int signum);
+ bool operator()(int exit_status) const;
+ private:
+ const int signum_;
+};
+# endif // !GTEST_OS_WINDOWS
+
+// EXPECT_DEBUG_DEATH asserts that the given statements die in debug mode.
+// The death testing framework causes this to have interesting semantics,
+// since the sideeffects of the call are only visible in opt mode, and not
+// in debug mode.
+//
+// In practice, this can be used to test functions that utilize the
+// LOG(DFATAL) macro using the following style:
+//
+// int DieInDebugOr12(int* sideeffect) {
+// if (sideeffect) {
+// *sideeffect = 12;
+// }
+// LOG(DFATAL) << "death";
+// return 12;
+// }
+//
+// TEST(TestCase, TestDieOr12WorksInDgbAndOpt) {
+// int sideeffect = 0;
+// // Only asserts in dbg.
+// EXPECT_DEBUG_DEATH(DieInDebugOr12(&sideeffect), "death");
+//
+// #ifdef NDEBUG
+// // opt-mode has sideeffect visible.
+// EXPECT_EQ(12, sideeffect);
+// #else
+// // dbg-mode no visible sideeffect.
+// EXPECT_EQ(0, sideeffect);
+// #endif
+// }
+//
+// This will assert that DieInDebugReturn12InOpt() crashes in debug
+// mode, usually due to a DCHECK or LOG(DFATAL), but returns the
+// appropriate fallback value (12 in this case) in opt mode. If you
+// need to test that a function has appropriate side-effects in opt
+// mode, include assertions against the side-effects. A general
+// pattern for this is:
+//
+// EXPECT_DEBUG_DEATH({
+// // Side-effects here will have an effect after this statement in
+// // opt mode, but none in debug mode.
+// EXPECT_EQ(12, DieInDebugOr12(&sideeffect));
+// }, "death");
+//
+# ifdef NDEBUG
+
+# define EXPECT_DEBUG_DEATH(statement, regex) \
+ GTEST_EXECUTE_STATEMENT_(statement, regex)
+
+# define ASSERT_DEBUG_DEATH(statement, regex) \
+ GTEST_EXECUTE_STATEMENT_(statement, regex)
+
+# else
+
+# define EXPECT_DEBUG_DEATH(statement, regex) \
+ EXPECT_DEATH(statement, regex)
+
+# define ASSERT_DEBUG_DEATH(statement, regex) \
+ ASSERT_DEATH(statement, regex)
+
+# endif // NDEBUG for EXPECT_DEBUG_DEATH
+#endif // GTEST_HAS_DEATH_TEST
+
+// EXPECT_DEATH_IF_SUPPORTED(statement, regex) and
+// ASSERT_DEATH_IF_SUPPORTED(statement, regex) expand to real death tests if
+// death tests are supported; otherwise they just issue a warning. This is
+// useful when you are combining death test assertions with normal test
+// assertions in one test.
+#if GTEST_HAS_DEATH_TEST
+# define EXPECT_DEATH_IF_SUPPORTED(statement, regex) \
+ EXPECT_DEATH(statement, regex)
+# define ASSERT_DEATH_IF_SUPPORTED(statement, regex) \
+ ASSERT_DEATH(statement, regex)
+#else
+# define EXPECT_DEATH_IF_SUPPORTED(statement, regex) \
+ GTEST_UNSUPPORTED_DEATH_TEST_(statement, regex, )
+# define ASSERT_DEATH_IF_SUPPORTED(statement, regex) \
+ GTEST_UNSUPPORTED_DEATH_TEST_(statement, regex, return)
+#endif
+
+} // namespace testing
+
+#endif // GTEST_INCLUDE_GTEST_GTEST_DEATH_TEST_H_
+// This file was GENERATED by command:
+// pump.py gtest-param-test.h.pump
+// DO NOT EDIT BY HAND!!!
+
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Authors: vladl@google.com (Vlad Losev)
+//
+// Macros and functions for implementing parameterized tests
+// in Google C++ Testing Framework (Google Test)
+//
+// This file is generated by a SCRIPT. DO NOT EDIT BY HAND!
+//
+#ifndef GTEST_INCLUDE_GTEST_GTEST_PARAM_TEST_H_
+#define GTEST_INCLUDE_GTEST_GTEST_PARAM_TEST_H_
+
+
+// Value-parameterized tests allow you to test your code with different
+// parameters without writing multiple copies of the same test.
+//
+// Here is how you use value-parameterized tests:
+
+#if 0
+
+// To write value-parameterized tests, first you should define a fixture
+// class. It is usually derived from testing::TestWithParam<T> (see below for
+// another inheritance scheme that's sometimes useful in more complicated
+// class hierarchies), where the type of your parameter values.
+// TestWithParam<T> is itself derived from testing::Test. T can be any
+// copyable type. If it's a raw pointer, you are responsible for managing the
+// lifespan of the pointed values.
+
+class FooTest : public ::testing::TestWithParam<const char*> {
+ // You can implement all the usual class fixture members here.
+};
+
+// Then, use the TEST_P macro to define as many parameterized tests
+// for this fixture as you want. The _P suffix is for "parameterized"
+// or "pattern", whichever you prefer to think.
+
+TEST_P(FooTest, DoesBlah) {
+ // Inside a test, access the test parameter with the GetParam() method
+ // of the TestWithParam<T> class:
+ EXPECT_TRUE(foo.Blah(GetParam()));
+ ...
+}
+
+TEST_P(FooTest, HasBlahBlah) {
+ ...
+}
+
+// Finally, you can use INSTANTIATE_TEST_CASE_P to instantiate the test
+// case with any set of parameters you want. Google Test defines a number
+// of functions for generating test parameters. They return what we call
+// (surprise!) parameter generators. Here is a summary of them, which
+// are all in the testing namespace:
+//
+//
+// Range(begin, end [, step]) - Yields values {begin, begin+step,
+// begin+step+step, ...}. The values do not
+// include end. step defaults to 1.
+// Values(v1, v2, ..., vN) - Yields values {v1, v2, ..., vN}.
+// ValuesIn(container) - Yields values from a C-style array, an STL
+// ValuesIn(begin,end) container, or an iterator range [begin, end).
+// Bool() - Yields sequence {false, true}.
+// Combine(g1, g2, ..., gN) - Yields all combinations (the Cartesian product
+// for the math savvy) of the values generated
+// by the N generators.
+//
+// For more details, see comments at the definitions of these functions below
+// in this file.
+//
+// The following statement will instantiate tests from the FooTest test case
+// each with parameter values "meeny", "miny", and "moe".
+
+INSTANTIATE_TEST_CASE_P(InstantiationName,
+ FooTest,
+ Values("meeny", "miny", "moe"));
+
+// To distinguish different instances of the pattern, (yes, you
+// can instantiate it more then once) the first argument to the
+// INSTANTIATE_TEST_CASE_P macro is a prefix that will be added to the
+// actual test case name. Remember to pick unique prefixes for different
+// instantiations. The tests from the instantiation above will have
+// these names:
+//
+// * InstantiationName/FooTest.DoesBlah/0 for "meeny"
+// * InstantiationName/FooTest.DoesBlah/1 for "miny"
+// * InstantiationName/FooTest.DoesBlah/2 for "moe"
+// * InstantiationName/FooTest.HasBlahBlah/0 for "meeny"
+// * InstantiationName/FooTest.HasBlahBlah/1 for "miny"
+// * InstantiationName/FooTest.HasBlahBlah/2 for "moe"
+//
+// You can use these names in --gtest_filter.
+//
+// This statement will instantiate all tests from FooTest again, each
+// with parameter values "cat" and "dog":
+
+const char* pets[] = {"cat", "dog"};
+INSTANTIATE_TEST_CASE_P(AnotherInstantiationName, FooTest, ValuesIn(pets));
+
+// The tests from the instantiation above will have these names:
+//
+// * AnotherInstantiationName/FooTest.DoesBlah/0 for "cat"
+// * AnotherInstantiationName/FooTest.DoesBlah/1 for "dog"
+// * AnotherInstantiationName/FooTest.HasBlahBlah/0 for "cat"
+// * AnotherInstantiationName/FooTest.HasBlahBlah/1 for "dog"
+//
+// Please note that INSTANTIATE_TEST_CASE_P will instantiate all tests
+// in the given test case, whether their definitions come before or
+// AFTER the INSTANTIATE_TEST_CASE_P statement.
+//
+// Please also note that generator expressions (including parameters to the
+// generators) are evaluated in InitGoogleTest(), after main() has started.
+// This allows the user on one hand, to adjust generator parameters in order
+// to dynamically determine a set of tests to run and on the other hand,
+// give the user a chance to inspect the generated tests with Google Test
+// reflection API before RUN_ALL_TESTS() is executed.
+//
+// You can see samples/sample7_unittest.cc and samples/sample8_unittest.cc
+// for more examples.
+//
+// In the future, we plan to publish the API for defining new parameter
+// generators. But for now this interface remains part of the internal
+// implementation and is subject to change.
+//
+//
+// A parameterized test fixture must be derived from testing::Test and from
+// testing::WithParamInterface<T>, where T is the type of the parameter
+// values. Inheriting from TestWithParam<T> satisfies that requirement because
+// TestWithParam<T> inherits from both Test and WithParamInterface. In more
+// complicated hierarchies, however, it is occasionally useful to inherit
+// separately from Test and WithParamInterface. For example:
+
+class BaseTest : public ::testing::Test {
+ // You can inherit all the usual members for a non-parameterized test
+ // fixture here.
+};
+
+class DerivedTest : public BaseTest, public ::testing::WithParamInterface<int> {
+ // The usual test fixture members go here too.
+};
+
+TEST_F(BaseTest, HasFoo) {
+ // This is an ordinary non-parameterized test.
+}
+
+TEST_P(DerivedTest, DoesBlah) {
+ // GetParam works just the same here as if you inherit from TestWithParam.
+ EXPECT_TRUE(foo.Blah(GetParam()));
+}
+
+#endif // 0
+
+
+#if !GTEST_OS_SYMBIAN
+# include <utility>
+#endif
+
+// scripts/fuse_gtest.py depends on gtest's own header being #included
+// *unconditionally*. Therefore these #includes cannot be moved
+// inside #if GTEST_HAS_PARAM_TEST.
+// Copyright 2008 Google Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: vladl@google.com (Vlad Losev)
+
+// Type and function utilities for implementing parameterized tests.
+
+#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PARAM_UTIL_H_
+#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PARAM_UTIL_H_
+
+#include <iterator>
+#include <utility>
+#include <vector>
+
+// scripts/fuse_gtest.py depends on gtest's own header being #included
+// *unconditionally*. Therefore these #includes cannot be moved
+// inside #if GTEST_HAS_PARAM_TEST.
+// Copyright 2003 Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Authors: Dan Egnor (egnor@google.com)
+//
+// A "smart" pointer type with reference tracking. Every pointer to a
+// particular object is kept on a circular linked list. When the last pointer
+// to an object is destroyed or reassigned, the object is deleted.
+//
+// Used properly, this deletes the object when the last reference goes away.
+// There are several caveats:
+// - Like all reference counting schemes, cycles lead to leaks.
+// - Each smart pointer is actually two pointers (8 bytes instead of 4).
+// - Every time a pointer is assigned, the entire list of pointers to that
+// object is traversed. This class is therefore NOT SUITABLE when there
+// will often be more than two or three pointers to a particular object.
+// - References are only tracked as long as linked_ptr<> objects are copied.
+// If a linked_ptr<> is converted to a raw pointer and back, BAD THINGS
+// will happen (double deletion).
+//
+// A good use of this class is storing object references in STL containers.
+// You can safely put linked_ptr<> in a vector<>.
+// Other uses may not be as good.
+//
+// Note: If you use an incomplete type with linked_ptr<>, the class
+// *containing* linked_ptr<> must have a constructor and destructor (even
+// if they do nothing!).
+//
+// Bill Gibbons suggested we use something like this.
+//
+// Thread Safety:
+// Unlike other linked_ptr implementations, in this implementation
+// a linked_ptr object is thread-safe in the sense that:
+// - it's safe to copy linked_ptr objects concurrently,
+// - it's safe to copy *from* a linked_ptr and read its underlying
+// raw pointer (e.g. via get()) concurrently, and
+// - it's safe to write to two linked_ptrs that point to the same
+// shared object concurrently.
+// TODO(wan@google.com): rename this to safe_linked_ptr to avoid
+// confusion with normal linked_ptr.
+
+#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_LINKED_PTR_H_
+#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_LINKED_PTR_H_
+
+#include <stdlib.h>
+#include <assert.h>
+
+
+namespace testing {
+namespace internal {
+
+// Protects copying of all linked_ptr objects.
+GTEST_API_ GTEST_DECLARE_STATIC_MUTEX_(g_linked_ptr_mutex);
+
+// This is used internally by all instances of linked_ptr<>. It needs to be
+// a non-template class because different types of linked_ptr<> can refer to
+// the same object (linked_ptr<Superclass>(obj) vs linked_ptr<Subclass>(obj)).
+// So, it needs to be possible for different types of linked_ptr to participate
+// in the same circular linked list, so we need a single class type here.
+//
+// DO NOT USE THIS CLASS DIRECTLY YOURSELF. Use linked_ptr<T>.
+class linked_ptr_internal {
+ public:
+ // Create a new circle that includes only this instance.
+ void join_new() {
+ next_ = this;
+ }
+
+ // Many linked_ptr operations may change p.link_ for some linked_ptr
+ // variable p in the same circle as this object. Therefore we need
+ // to prevent two such operations from occurring concurrently.
+ //
+ // Note that different types of linked_ptr objects can coexist in a
+ // circle (e.g. linked_ptr<Base>, linked_ptr<Derived1>, and
+ // linked_ptr<Derived2>). Therefore we must use a single mutex to
+ // protect all linked_ptr objects. This can create serious
+ // contention in production code, but is acceptable in a testing
+ // framework.
+
+ // Join an existing circle.
+ void join(linked_ptr_internal const* ptr)
+ GTEST_LOCK_EXCLUDED_(g_linked_ptr_mutex) {
+ MutexLock lock(&g_linked_ptr_mutex);
+
+ linked_ptr_internal const* p = ptr;
+ while (p->next_ != ptr) p = p->next_;
+ p->next_ = this;
+ next_ = ptr;
+ }
+
+ // Leave whatever circle we're part of. Returns true if we were the
+ // last member of the circle. Once this is done, you can join() another.
+ bool depart()
+ GTEST_LOCK_EXCLUDED_(g_linked_ptr_mutex) {
+ MutexLock lock(&g_linked_ptr_mutex);
+
+ if (next_ == this) return true;
+ linked_ptr_internal const* p = next_;
+ while (p->next_ != this) p = p->next_;
+ p->next_ = next_;
+ return false;
+ }
+
+ private:
+ mutable linked_ptr_internal const* next_;
+};
+
+template <typename T>
+class linked_ptr {
+ public:
+ typedef T element_type;
+
+ // Take over ownership of a raw pointer. This should happen as soon as
+ // possible after the object is created.
+ explicit linked_ptr(T* ptr = NULL) { capture(ptr); }
+ ~linked_ptr() { depart(); }
+
+ // Copy an existing linked_ptr<>, adding ourselves to the list of references.
+ template <typename U> linked_ptr(linked_ptr<U> const& ptr) { copy(&ptr); }
+ linked_ptr(linked_ptr const& ptr) { // NOLINT
+ assert(&ptr != this);
+ copy(&ptr);
+ }
+
+ // Assignment releases the old value and acquires the new.
+ template <typename U> linked_ptr& operator=(linked_ptr<U> const& ptr) {
+ depart();
+ copy(&ptr);
+ return *this;
+ }
+
+ linked_ptr& operator=(linked_ptr const& ptr) {
+ if (&ptr != this) {
+ depart();
+ copy(&ptr);
+ }
+ return *this;
+ }
+
+ // Smart pointer members.
+ void reset(T* ptr = NULL) {
+ depart();
+ capture(ptr);
+ }
+ T* get() const { return value_; }
+ T* operator->() const { return value_; }
+ T& operator*() const { return *value_; }
+
+ bool operator==(T* p) const { return value_ == p; }
+ bool operator!=(T* p) const { return value_ != p; }
+ template <typename U>
+ bool operator==(linked_ptr<U> const& ptr) const {
+ return value_ == ptr.get();
+ }
+ template <typename U>
+ bool operator!=(linked_ptr<U> const& ptr) const {
+ return value_ != ptr.get();
+ }
+
+ private:
+ template <typename U>
+ friend class linked_ptr;
+
+ T* value_;
+ linked_ptr_internal link_;
+
+ void depart() {
+ if (link_.depart()) delete value_;
+ }
+
+ void capture(T* ptr) {
+ value_ = ptr;
+ link_.join_new();
+ }
+
+ template <typename U> void copy(linked_ptr<U> const* ptr) {
+ value_ = ptr->get();
+ if (value_)
+ link_.join(&ptr->link_);
+ else
+ link_.join_new();
+ }
+};
+
+template<typename T> inline
+bool operator==(T* ptr, const linked_ptr<T>& x) {
+ return ptr == x.get();
+}
+
+template<typename T> inline
+bool operator!=(T* ptr, const linked_ptr<T>& x) {
+ return ptr != x.get();
+}
+
+// A function to convert T* into linked_ptr<T>
+// Doing e.g. make_linked_ptr(new FooBarBaz<type>(arg)) is a shorter notation
+// for linked_ptr<FooBarBaz<type> >(new FooBarBaz<type>(arg))
+template <typename T>
+linked_ptr<T> make_linked_ptr(T* ptr) {
+ return linked_ptr<T>(ptr);
+}
+
+} // namespace internal
+} // namespace testing
+
+#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_LINKED_PTR_H_
+// Copyright 2007, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan)
+
+// Google Test - The Google C++ Testing Framework
+//
+// This file implements a universal value printer that can print a
+// value of any type T:
+//
+// void ::testing::internal::UniversalPrinter<T>::Print(value, ostream_ptr);
+//
+// A user can teach this function how to print a class type T by
+// defining either operator<<() or PrintTo() in the namespace that
+// defines T. More specifically, the FIRST defined function in the
+// following list will be used (assuming T is defined in namespace
+// foo):
+//
+// 1. foo::PrintTo(const T&, ostream*)
+// 2. operator<<(ostream&, const T&) defined in either foo or the
+// global namespace.
+//
+// If none of the above is defined, it will print the debug string of
+// the value if it is a protocol buffer, or print the raw bytes in the
+// value otherwise.
+//
+// To aid debugging: when T is a reference type, the address of the
+// value is also printed; when T is a (const) char pointer, both the
+// pointer value and the NUL-terminated string it points to are
+// printed.
+//
+// We also provide some convenient wrappers:
+//
+// // Prints a value to a string. For a (const or not) char
+// // pointer, the NUL-terminated string (but not the pointer) is
+// // printed.
+// std::string ::testing::PrintToString(const T& value);
+//
+// // Prints a value tersely: for a reference type, the referenced
+// // value (but not the address) is printed; for a (const or not) char
+// // pointer, the NUL-terminated string (but not the pointer) is
+// // printed.
+// void ::testing::internal::UniversalTersePrint(const T& value, ostream*);
+//
+// // Prints value using the type inferred by the compiler. The difference
+// // from UniversalTersePrint() is that this function prints both the
+// // pointer and the NUL-terminated string for a (const or not) char pointer.
+// void ::testing::internal::UniversalPrint(const T& value, ostream*);
+//
+// // Prints the fields of a tuple tersely to a string vector, one
+// // element for each field. Tuple support must be enabled in
+// // gtest-port.h.
+// std::vector<string> UniversalTersePrintTupleFieldsToStrings(
+// const Tuple& value);
+//
+// Known limitation:
+//
+// The print primitives print the elements of an STL-style container
+// using the compiler-inferred type of *iter where iter is a
+// const_iterator of the container. When const_iterator is an input
+// iterator but not a forward iterator, this inferred type may not
+// match value_type, and the print output may be incorrect. In
+// practice, this is rarely a problem as for most containers
+// const_iterator is a forward iterator. We'll fix this if there's an
+// actual need for it. Note that this fix cannot rely on value_type
+// being defined as many user-defined container types don't have
+// value_type.
+
+#ifndef GTEST_INCLUDE_GTEST_GTEST_PRINTERS_H_
+#define GTEST_INCLUDE_GTEST_GTEST_PRINTERS_H_
+
+#include <ostream> // NOLINT
+#include <sstream>
+#include <string>
+#include <utility>
+#include <vector>
+
+namespace testing {
+
+// Definitions in the 'internal' and 'internal2' name spaces are
+// subject to change without notice. DO NOT USE THEM IN USER CODE!
+namespace internal2 {
+
+// Prints the given number of bytes in the given object to the given
+// ostream.
+GTEST_API_ void PrintBytesInObjectTo(const unsigned char* obj_bytes,
+ size_t count,
+ ::std::ostream* os);
+
+// For selecting which printer to use when a given type has neither <<
+// nor PrintTo().
+enum TypeKind {
+ kProtobuf, // a protobuf type
+ kConvertibleToInteger, // a type implicitly convertible to BiggestInt
+ // (e.g. a named or unnamed enum type)
+ kOtherType // anything else
+};
+
+// TypeWithoutFormatter<T, kTypeKind>::PrintValue(value, os) is called
+// by the universal printer to print a value of type T when neither
+// operator<< nor PrintTo() is defined for T, where kTypeKind is the
+// "kind" of T as defined by enum TypeKind.
+template <typename T, TypeKind kTypeKind>
+class TypeWithoutFormatter {
+ public:
+ // This default version is called when kTypeKind is kOtherType.
+ static void PrintValue(const T& value, ::std::ostream* os) {
+ PrintBytesInObjectTo(reinterpret_cast<const unsigned char*>(&value),
+ sizeof(value), os);
+ }
+};
+
+// We print a protobuf using its ShortDebugString() when the string
+// doesn't exceed this many characters; otherwise we print it using
+// DebugString() for better readability.
+const size_t kProtobufOneLinerMaxLength = 50;
+
+template <typename T>
+class TypeWithoutFormatter<T, kProtobuf> {
+ public:
+ static void PrintValue(const T& value, ::std::ostream* os) {
+ const ::testing::internal::string short_str = value.ShortDebugString();
+ const ::testing::internal::string pretty_str =
+ short_str.length() <= kProtobufOneLinerMaxLength ?
+ short_str : ("\n" + value.DebugString());
+ *os << ("<" + pretty_str + ">");
+ }
+};
+
+template <typename T>
+class TypeWithoutFormatter<T, kConvertibleToInteger> {
+ public:
+ // Since T has no << operator or PrintTo() but can be implicitly
+ // converted to BiggestInt, we print it as a BiggestInt.
+ //
+ // Most likely T is an enum type (either named or unnamed), in which
+ // case printing it as an integer is the desired behavior. In case
+ // T is not an enum, printing it as an integer is the best we can do
+ // given that it has no user-defined printer.
+ static void PrintValue(const T& value, ::std::ostream* os) {
+ const internal::BiggestInt kBigInt = value;
+ *os << kBigInt;
+ }
+};
+
+// Prints the given value to the given ostream. If the value is a
+// protocol message, its debug string is printed; if it's an enum or
+// of a type implicitly convertible to BiggestInt, it's printed as an
+// integer; otherwise the bytes in the value are printed. This is
+// what UniversalPrinter<T>::Print() does when it knows nothing about
+// type T and T has neither << operator nor PrintTo().
+//
+// A user can override this behavior for a class type Foo by defining
+// a << operator in the namespace where Foo is defined.
+//
+// We put this operator in namespace 'internal2' instead of 'internal'
+// to simplify the implementation, as much code in 'internal' needs to
+// use << in STL, which would conflict with our own << were it defined
+// in 'internal'.
+//
+// Note that this operator<< takes a generic std::basic_ostream<Char,
+// CharTraits> type instead of the more restricted std::ostream. If
+// we define it to take an std::ostream instead, we'll get an
+// "ambiguous overloads" compiler error when trying to print a type
+// Foo that supports streaming to std::basic_ostream<Char,
+// CharTraits>, as the compiler cannot tell whether
+// operator<<(std::ostream&, const T&) or
+// operator<<(std::basic_stream<Char, CharTraits>, const Foo&) is more
+// specific.
+template <typename Char, typename CharTraits, typename T>
+::std::basic_ostream<Char, CharTraits>& operator<<(
+ ::std::basic_ostream<Char, CharTraits>& os, const T& x) {
+ TypeWithoutFormatter<T,
+ (internal::IsAProtocolMessage<T>::value ? kProtobuf :
+ internal::ImplicitlyConvertible<const T&, internal::BiggestInt>::value ?
+ kConvertibleToInteger : kOtherType)>::PrintValue(x, &os);
+ return os;
+}
+
+} // namespace internal2
+} // namespace testing
+
+// This namespace MUST NOT BE NESTED IN ::testing, or the name look-up
+// magic needed for implementing UniversalPrinter won't work.
+namespace testing_internal {
+
+// Used to print a value that is not an STL-style container when the
+// user doesn't define PrintTo() for it.
+template <typename T>
+void DefaultPrintNonContainerTo(const T& value, ::std::ostream* os) {
+ // With the following statement, during unqualified name lookup,
+ // testing::internal2::operator<< appears as if it was declared in
+ // the nearest enclosing namespace that contains both
+ // ::testing_internal and ::testing::internal2, i.e. the global
+ // namespace. For more details, refer to the C++ Standard section
+ // 7.3.4-1 [namespace.udir]. This allows us to fall back onto
+ // testing::internal2::operator<< in case T doesn't come with a <<
+ // operator.
+ //
+ // We cannot write 'using ::testing::internal2::operator<<;', which
+ // gcc 3.3 fails to compile due to a compiler bug.
+ using namespace ::testing::internal2; // NOLINT
+
+ // Assuming T is defined in namespace foo, in the next statement,
+ // the compiler will consider all of:
+ //
+ // 1. foo::operator<< (thanks to Koenig look-up),
+ // 2. ::operator<< (as the current namespace is enclosed in ::),
+ // 3. testing::internal2::operator<< (thanks to the using statement above).
+ //
+ // The operator<< whose type matches T best will be picked.
+ //
+ // We deliberately allow #2 to be a candidate, as sometimes it's
+ // impossible to define #1 (e.g. when foo is ::std, defining
+ // anything in it is undefined behavior unless you are a compiler
+ // vendor.).
+ *os << value;
+}
+
+} // namespace testing_internal
+
+namespace testing {
+namespace internal {
+
+// UniversalPrinter<T>::Print(value, ostream_ptr) prints the given
+// value to the given ostream. The caller must ensure that
+// 'ostream_ptr' is not NULL, or the behavior is undefined.
+//
+// We define UniversalPrinter as a class template (as opposed to a
+// function template), as we need to partially specialize it for
+// reference types, which cannot be done with function templates.
+template <typename T>
+class UniversalPrinter;
+
+template <typename T>
+void UniversalPrint(const T& value, ::std::ostream* os);
+
+// Used to print an STL-style container when the user doesn't define
+// a PrintTo() for it.
+template <typename C>
+void DefaultPrintTo(IsContainer /* dummy */,
+ false_type /* is not a pointer */,
+ const C& container, ::std::ostream* os) {
+ const size_t kMaxCount = 32; // The maximum number of elements to print.
+ *os << '{';
+ size_t count = 0;
+ for (typename C::const_iterator it = container.begin();
+ it != container.end(); ++it, ++count) {
+ if (count > 0) {
+ *os << ',';
+ if (count == kMaxCount) { // Enough has been printed.
+ *os << " ...";
+ break;
+ }
+ }
+ *os << ' ';
+ // We cannot call PrintTo(*it, os) here as PrintTo() doesn't
+ // handle *it being a native array.
+ internal::UniversalPrint(*it, os);
+ }
+
+ if (count > 0) {
+ *os << ' ';
+ }
+ *os << '}';
+}
+
+// Used to print a pointer that is neither a char pointer nor a member
+// pointer, when the user doesn't define PrintTo() for it. (A member
+// variable pointer or member function pointer doesn't really point to
+// a location in the address space. Their representation is
+// implementation-defined. Therefore they will be printed as raw
+// bytes.)
+template <typename T>
+void DefaultPrintTo(IsNotContainer /* dummy */,
+ true_type /* is a pointer */,
+ T* p, ::std::ostream* os) {
+ if (p == NULL) {
+ *os << "NULL";
+ } else {
+ // C++ doesn't allow casting from a function pointer to any object
+ // pointer.
+ //
+ // IsTrue() silences warnings: "Condition is always true",
+ // "unreachable code".
+ if (IsTrue(ImplicitlyConvertible<T*, const void*>::value)) {
+ // T is not a function type. We just call << to print p,
+ // relying on ADL to pick up user-defined << for their pointer
+ // types, if any.
+ *os << p;
+ } else {
+ // T is a function type, so '*os << p' doesn't do what we want
+ // (it just prints p as bool). We want to print p as a const
+ // void*. However, we cannot cast it to const void* directly,
+ // even using reinterpret_cast, as earlier versions of gcc
+ // (e.g. 3.4.5) cannot compile the cast when p is a function
+ // pointer. Casting to UInt64 first solves the problem.
+ *os << reinterpret_cast<const void*>(
+ reinterpret_cast<internal::UInt64>(p));
+ }
+ }
+}
+
+// Used to print a non-container, non-pointer value when the user
+// doesn't define PrintTo() for it.
+template <typename T>
+void DefaultPrintTo(IsNotContainer /* dummy */,
+ false_type /* is not a pointer */,
+ const T& value, ::std::ostream* os) {
+ ::testing_internal::DefaultPrintNonContainerTo(value, os);
+}
+
+// Prints the given value using the << operator if it has one;
+// otherwise prints the bytes in it. This is what
+// UniversalPrinter<T>::Print() does when PrintTo() is not specialized
+// or overloaded for type T.
+//
+// A user can override this behavior for a class type Foo by defining
+// an overload of PrintTo() in the namespace where Foo is defined. We
+// give the user this option as sometimes defining a << operator for
+// Foo is not desirable (e.g. the coding style may prevent doing it,
+// or there is already a << operator but it doesn't do what the user
+// wants).
+template <typename T>
+void PrintTo(const T& value, ::std::ostream* os) {
+ // DefaultPrintTo() is overloaded. The type of its first two
+ // arguments determine which version will be picked. If T is an
+ // STL-style container, the version for container will be called; if
+ // T is a pointer, the pointer version will be called; otherwise the
+ // generic version will be called.
+ //
+ // Note that we check for container types here, prior to we check
+ // for protocol message types in our operator<<. The rationale is:
+ //
+ // For protocol messages, we want to give people a chance to
+ // override Google Mock's format by defining a PrintTo() or
+ // operator<<. For STL containers, other formats can be
+ // incompatible with Google Mock's format for the container
+ // elements; therefore we check for container types here to ensure
+ // that our format is used.
+ //
+ // The second argument of DefaultPrintTo() is needed to bypass a bug
+ // in Symbian's C++ compiler that prevents it from picking the right
+ // overload between:
+ //
+ // PrintTo(const T& x, ...);
+ // PrintTo(T* x, ...);
+ DefaultPrintTo(IsContainerTest<T>(0), is_pointer<T>(), value, os);
+}
+
+// The following list of PrintTo() overloads tells
+// UniversalPrinter<T>::Print() how to print standard types (built-in
+// types, strings, plain arrays, and pointers).
+
+// Overloads for various char types.
+GTEST_API_ void PrintTo(unsigned char c, ::std::ostream* os);
+GTEST_API_ void PrintTo(signed char c, ::std::ostream* os);
+inline void PrintTo(char c, ::std::ostream* os) {
+ // When printing a plain char, we always treat it as unsigned. This
+ // way, the output won't be affected by whether the compiler thinks
+ // char is signed or not.
+ PrintTo(static_cast<unsigned char>(c), os);
+}
+
+// Overloads for other simple built-in types.
+inline void PrintTo(bool x, ::std::ostream* os) {
+ *os << (x ? "true" : "false");
+}
+
+// Overload for wchar_t type.
+// Prints a wchar_t as a symbol if it is printable or as its internal
+// code otherwise and also as its decimal code (except for L'\0').
+// The L'\0' char is printed as "L'\\0'". The decimal code is printed
+// as signed integer when wchar_t is implemented by the compiler
+// as a signed type and is printed as an unsigned integer when wchar_t
+// is implemented as an unsigned type.
+GTEST_API_ void PrintTo(wchar_t wc, ::std::ostream* os);
+
+// Overloads for C strings.
+GTEST_API_ void PrintTo(const char* s, ::std::ostream* os);
+inline void PrintTo(char* s, ::std::ostream* os) {
+ PrintTo(ImplicitCast_<const char*>(s), os);
+}
+
+// signed/unsigned char is often used for representing binary data, so
+// we print pointers to it as void* to be safe.
+inline void PrintTo(const signed char* s, ::std::ostream* os) {
+ PrintTo(ImplicitCast_<const void*>(s), os);
+}
+inline void PrintTo(signed char* s, ::std::ostream* os) {
+ PrintTo(ImplicitCast_<const void*>(s), os);
+}
+inline void PrintTo(const unsigned char* s, ::std::ostream* os) {
+ PrintTo(ImplicitCast_<const void*>(s), os);
+}
+inline void PrintTo(unsigned char* s, ::std::ostream* os) {
+ PrintTo(ImplicitCast_<const void*>(s), os);
+}
+
+// MSVC can be configured to define wchar_t as a typedef of unsigned
+// short. It defines _NATIVE_WCHAR_T_DEFINED when wchar_t is a native
+// type. When wchar_t is a typedef, defining an overload for const
+// wchar_t* would cause unsigned short* be printed as a wide string,
+// possibly causing invalid memory accesses.
+#if !defined(_MSC_VER) || defined(_NATIVE_WCHAR_T_DEFINED)
+// Overloads for wide C strings
+GTEST_API_ void PrintTo(const wchar_t* s, ::std::ostream* os);
+inline void PrintTo(wchar_t* s, ::std::ostream* os) {
+ PrintTo(ImplicitCast_<const wchar_t*>(s), os);
+}
+#endif
+
+// Overload for C arrays. Multi-dimensional arrays are printed
+// properly.
+
+// Prints the given number of elements in an array, without printing
+// the curly braces.
+template <typename T>
+void PrintRawArrayTo(const T a[], size_t count, ::std::ostream* os) {
+ UniversalPrint(a[0], os);
+ for (size_t i = 1; i != count; i++) {
+ *os << ", ";
+ UniversalPrint(a[i], os);
+ }
+}
+
+// Overloads for ::string and ::std::string.
+#if GTEST_HAS_GLOBAL_STRING
+GTEST_API_ void PrintStringTo(const ::string&s, ::std::ostream* os);
+inline void PrintTo(const ::string& s, ::std::ostream* os) {
+ PrintStringTo(s, os);
+}
+#endif // GTEST_HAS_GLOBAL_STRING
+
+GTEST_API_ void PrintStringTo(const ::std::string&s, ::std::ostream* os);
+inline void PrintTo(const ::std::string& s, ::std::ostream* os) {
+ PrintStringTo(s, os);
+}
+
+// Overloads for ::wstring and ::std::wstring.
+#if GTEST_HAS_GLOBAL_WSTRING
+GTEST_API_ void PrintWideStringTo(const ::wstring&s, ::std::ostream* os);
+inline void PrintTo(const ::wstring& s, ::std::ostream* os) {
+ PrintWideStringTo(s, os);
+}
+#endif // GTEST_HAS_GLOBAL_WSTRING
+
+#if GTEST_HAS_STD_WSTRING
+GTEST_API_ void PrintWideStringTo(const ::std::wstring&s, ::std::ostream* os);
+inline void PrintTo(const ::std::wstring& s, ::std::ostream* os) {
+ PrintWideStringTo(s, os);
+}
+#endif // GTEST_HAS_STD_WSTRING
+
+#if GTEST_HAS_TR1_TUPLE
+// Overload for ::std::tr1::tuple. Needed for printing function arguments,
+// which are packed as tuples.
+
+// Helper function for printing a tuple. T must be instantiated with
+// a tuple type.
+template <typename T>
+void PrintTupleTo(const T& t, ::std::ostream* os);
+
+// Overloaded PrintTo() for tuples of various arities. We support
+// tuples of up-to 10 fields. The following implementation works
+// regardless of whether tr1::tuple is implemented using the
+// non-standard variadic template feature or not.
+
+inline void PrintTo(const ::std::tr1::tuple<>& t, ::std::ostream* os) {
+ PrintTupleTo(t, os);
+}
+
+template <typename T1>
+void PrintTo(const ::std::tr1::tuple<T1>& t, ::std::ostream* os) {
+ PrintTupleTo(t, os);
+}
+
+template <typename T1, typename T2>
+void PrintTo(const ::std::tr1::tuple<T1, T2>& t, ::std::ostream* os) {
+ PrintTupleTo(t, os);
+}
+
+template <typename T1, typename T2, typename T3>
+void PrintTo(const ::std::tr1::tuple<T1, T2, T3>& t, ::std::ostream* os) {
+ PrintTupleTo(t, os);
+}
+
+template <typename T1, typename T2, typename T3, typename T4>
+void PrintTo(const ::std::tr1::tuple<T1, T2, T3, T4>& t, ::std::ostream* os) {
+ PrintTupleTo(t, os);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5>
+void PrintTo(const ::std::tr1::tuple<T1, T2, T3, T4, T5>& t,
+ ::std::ostream* os) {
+ PrintTupleTo(t, os);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6>
+void PrintTo(const ::std::tr1::tuple<T1, T2, T3, T4, T5, T6>& t,
+ ::std::ostream* os) {
+ PrintTupleTo(t, os);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7>
+void PrintTo(const ::std::tr1::tuple<T1, T2, T3, T4, T5, T6, T7>& t,
+ ::std::ostream* os) {
+ PrintTupleTo(t, os);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8>
+void PrintTo(const ::std::tr1::tuple<T1, T2, T3, T4, T5, T6, T7, T8>& t,
+ ::std::ostream* os) {
+ PrintTupleTo(t, os);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9>
+void PrintTo(const ::std::tr1::tuple<T1, T2, T3, T4, T5, T6, T7, T8, T9>& t,
+ ::std::ostream* os) {
+ PrintTupleTo(t, os);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10>
+void PrintTo(
+ const ::std::tr1::tuple<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10>& t,
+ ::std::ostream* os) {
+ PrintTupleTo(t, os);
+}
+#endif // GTEST_HAS_TR1_TUPLE
+
+// Overload for std::pair.
+template <typename T1, typename T2>
+void PrintTo(const ::std::pair<T1, T2>& value, ::std::ostream* os) {
+ *os << '(';
+ // We cannot use UniversalPrint(value.first, os) here, as T1 may be
+ // a reference type. The same for printing value.second.
+ UniversalPrinter<T1>::Print(value.first, os);
+ *os << ", ";
+ UniversalPrinter<T2>::Print(value.second, os);
+ *os << ')';
+}
+
+// Implements printing a non-reference type T by letting the compiler
+// pick the right overload of PrintTo() for T.
+template <typename T>
+class UniversalPrinter {
+ public:
+ // MSVC warns about adding const to a function type, so we want to
+ // disable the warning.
+#ifdef _MSC_VER
+# pragma warning(push) // Saves the current warning state.
+# pragma warning(disable:4180) // Temporarily disables warning 4180.
+#endif // _MSC_VER
+
+ // Note: we deliberately don't call this PrintTo(), as that name
+ // conflicts with ::testing::internal::PrintTo in the body of the
+ // function.
+ static void Print(const T& value, ::std::ostream* os) {
+ // By default, ::testing::internal::PrintTo() is used for printing
+ // the value.
+ //
+ // Thanks to Koenig look-up, if T is a class and has its own
+ // PrintTo() function defined in its namespace, that function will
+ // be visible here. Since it is more specific than the generic ones
+ // in ::testing::internal, it will be picked by the compiler in the
+ // following statement - exactly what we want.
+ PrintTo(value, os);
+ }
+
+#ifdef _MSC_VER
+# pragma warning(pop) // Restores the warning state.
+#endif // _MSC_VER
+};
+
+// UniversalPrintArray(begin, len, os) prints an array of 'len'
+// elements, starting at address 'begin'.
+template <typename T>
+void UniversalPrintArray(const T* begin, size_t len, ::std::ostream* os) {
+ if (len == 0) {
+ *os << "{}";
+ } else {
+ *os << "{ ";
+ const size_t kThreshold = 18;
+ const size_t kChunkSize = 8;
+ // If the array has more than kThreshold elements, we'll have to
+ // omit some details by printing only the first and the last
+ // kChunkSize elements.
+ // TODO(wan@google.com): let the user control the threshold using a flag.
+ if (len <= kThreshold) {
+ PrintRawArrayTo(begin, len, os);
+ } else {
+ PrintRawArrayTo(begin, kChunkSize, os);
+ *os << ", ..., ";
+ PrintRawArrayTo(begin + len - kChunkSize, kChunkSize, os);
+ }
+ *os << " }";
+ }
+}
+// This overload prints a (const) char array compactly.
+GTEST_API_ void UniversalPrintArray(
+ const char* begin, size_t len, ::std::ostream* os);
+
+// This overload prints a (const) wchar_t array compactly.
+GTEST_API_ void UniversalPrintArray(
+ const wchar_t* begin, size_t len, ::std::ostream* os);
+
+// Implements printing an array type T[N].
+template <typename T, size_t N>
+class UniversalPrinter<T[N]> {
+ public:
+ // Prints the given array, omitting some elements when there are too
+ // many.
+ static void Print(const T (&a)[N], ::std::ostream* os) {
+ UniversalPrintArray(a, N, os);
+ }
+};
+
+// Implements printing a reference type T&.
+template <typename T>
+class UniversalPrinter<T&> {
+ public:
+ // MSVC warns about adding const to a function type, so we want to
+ // disable the warning.
+#ifdef _MSC_VER
+# pragma warning(push) // Saves the current warning state.
+# pragma warning(disable:4180) // Temporarily disables warning 4180.
+#endif // _MSC_VER
+
+ static void Print(const T& value, ::std::ostream* os) {
+ // Prints the address of the value. We use reinterpret_cast here
+ // as static_cast doesn't compile when T is a function type.
+ *os << "@" << reinterpret_cast<const void*>(&value) << " ";
+
+ // Then prints the value itself.
+ UniversalPrint(value, os);
+ }
+
+#ifdef _MSC_VER
+# pragma warning(pop) // Restores the warning state.
+#endif // _MSC_VER
+};
+
+// Prints a value tersely: for a reference type, the referenced value
+// (but not the address) is printed; for a (const) char pointer, the
+// NUL-terminated string (but not the pointer) is printed.
+
+template <typename T>
+class UniversalTersePrinter {
+ public:
+ static void Print(const T& value, ::std::ostream* os) {
+ UniversalPrint(value, os);
+ }
+};
+template <typename T>
+class UniversalTersePrinter<T&> {
+ public:
+ static void Print(const T& value, ::std::ostream* os) {
+ UniversalPrint(value, os);
+ }
+};
+template <typename T, size_t N>
+class UniversalTersePrinter<T[N]> {
+ public:
+ static void Print(const T (&value)[N], ::std::ostream* os) {
+ UniversalPrinter<T[N]>::Print(value, os);
+ }
+};
+template <>
+class UniversalTersePrinter<const char*> {
+ public:
+ static void Print(const char* str, ::std::ostream* os) {
+ if (str == NULL) {
+ *os << "NULL";
+ } else {
+ UniversalPrint(string(str), os);
+ }
+ }
+};
+template <>
+class UniversalTersePrinter<char*> {
+ public:
+ static void Print(char* str, ::std::ostream* os) {
+ UniversalTersePrinter<const char*>::Print(str, os);
+ }
+};
+
+#if GTEST_HAS_STD_WSTRING
+template <>
+class UniversalTersePrinter<const wchar_t*> {
+ public:
+ static void Print(const wchar_t* str, ::std::ostream* os) {
+ if (str == NULL) {
+ *os << "NULL";
+ } else {
+ UniversalPrint(::std::wstring(str), os);
+ }
+ }
+};
+#endif
+
+template <>
+class UniversalTersePrinter<wchar_t*> {
+ public:
+ static void Print(wchar_t* str, ::std::ostream* os) {
+ UniversalTersePrinter<const wchar_t*>::Print(str, os);
+ }
+};
+
+template <typename T>
+void UniversalTersePrint(const T& value, ::std::ostream* os) {
+ UniversalTersePrinter<T>::Print(value, os);
+}
+
+// Prints a value using the type inferred by the compiler. The
+// difference between this and UniversalTersePrint() is that for a
+// (const) char pointer, this prints both the pointer and the
+// NUL-terminated string.
+template <typename T>
+void UniversalPrint(const T& value, ::std::ostream* os) {
+ // A workarond for the bug in VC++ 7.1 that prevents us from instantiating
+ // UniversalPrinter with T directly.
+ typedef T T1;
+ UniversalPrinter<T1>::Print(value, os);
+}
+
+#if GTEST_HAS_TR1_TUPLE
+typedef ::std::vector<string> Strings;
+
+// This helper template allows PrintTo() for tuples and
+// UniversalTersePrintTupleFieldsToStrings() to be defined by
+// induction on the number of tuple fields. The idea is that
+// TuplePrefixPrinter<N>::PrintPrefixTo(t, os) prints the first N
+// fields in tuple t, and can be defined in terms of
+// TuplePrefixPrinter<N - 1>.
+
+// The inductive case.
+template <size_t N>
+struct TuplePrefixPrinter {
+ // Prints the first N fields of a tuple.
+ template <typename Tuple>
+ static void PrintPrefixTo(const Tuple& t, ::std::ostream* os) {
+ TuplePrefixPrinter<N - 1>::PrintPrefixTo(t, os);
+ *os << ", ";
+ UniversalPrinter<typename ::std::tr1::tuple_element<N - 1, Tuple>::type>
+ ::Print(::std::tr1::get<N - 1>(t), os);
+ }
+
+ // Tersely prints the first N fields of a tuple to a string vector,
+ // one element for each field.
+ template <typename Tuple>
+ static void TersePrintPrefixToStrings(const Tuple& t, Strings* strings) {
+ TuplePrefixPrinter<N - 1>::TersePrintPrefixToStrings(t, strings);
+ ::std::stringstream ss;
+ UniversalTersePrint(::std::tr1::get<N - 1>(t), &ss);
+ strings->push_back(ss.str());
+ }
+};
+
+// Base cases.
+template <>
+struct TuplePrefixPrinter<0> {
+ template <typename Tuple>
+ static void PrintPrefixTo(const Tuple&, ::std::ostream*) {}
+
+ template <typename Tuple>
+ static void TersePrintPrefixToStrings(const Tuple&, Strings*) {}
+};
+// We have to specialize the entire TuplePrefixPrinter<> class
+// template here, even though the definition of
+// TersePrintPrefixToStrings() is the same as the generic version, as
+// Embarcadero (formerly CodeGear, formerly Borland) C++ doesn't
+// support specializing a method template of a class template.
+template <>
+struct TuplePrefixPrinter<1> {
+ template <typename Tuple>
+ static void PrintPrefixTo(const Tuple& t, ::std::ostream* os) {
+ UniversalPrinter<typename ::std::tr1::tuple_element<0, Tuple>::type>::
+ Print(::std::tr1::get<0>(t), os);
+ }
+
+ template <typename Tuple>
+ static void TersePrintPrefixToStrings(const Tuple& t, Strings* strings) {
+ ::std::stringstream ss;
+ UniversalTersePrint(::std::tr1::get<0>(t), &ss);
+ strings->push_back(ss.str());
+ }
+};
+
+// Helper function for printing a tuple. T must be instantiated with
+// a tuple type.
+template <typename T>
+void PrintTupleTo(const T& t, ::std::ostream* os) {
+ *os << "(";
+ TuplePrefixPrinter< ::std::tr1::tuple_size<T>::value>::
+ PrintPrefixTo(t, os);
+ *os << ")";
+}
+
+// Prints the fields of a tuple tersely to a string vector, one
+// element for each field. See the comment before
+// UniversalTersePrint() for how we define "tersely".
+template <typename Tuple>
+Strings UniversalTersePrintTupleFieldsToStrings(const Tuple& value) {
+ Strings result;
+ TuplePrefixPrinter< ::std::tr1::tuple_size<Tuple>::value>::
+ TersePrintPrefixToStrings(value, &result);
+ return result;
+}
+#endif // GTEST_HAS_TR1_TUPLE
+
+} // namespace internal
+
+template <typename T>
+::std::string PrintToString(const T& value) {
+ ::std::stringstream ss;
+ internal::UniversalTersePrinter<T>::Print(value, &ss);
+ return ss.str();
+}
+
+} // namespace testing
+
+#endif // GTEST_INCLUDE_GTEST_GTEST_PRINTERS_H_
+
+#if GTEST_HAS_PARAM_TEST
+
+namespace testing {
+namespace internal {
+
+// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
+//
+// Outputs a message explaining invalid registration of different
+// fixture class for the same test case. This may happen when
+// TEST_P macro is used to define two tests with the same name
+// but in different namespaces.
+GTEST_API_ void ReportInvalidTestCaseType(const char* test_case_name,
+ const char* file, int line);
+
+template <typename> class ParamGeneratorInterface;
+template <typename> class ParamGenerator;
+
+// Interface for iterating over elements provided by an implementation
+// of ParamGeneratorInterface<T>.
+template <typename T>
+class ParamIteratorInterface {
+ public:
+ virtual ~ParamIteratorInterface() {}
+ // A pointer to the base generator instance.
+ // Used only for the purposes of iterator comparison
+ // to make sure that two iterators belong to the same generator.
+ virtual const ParamGeneratorInterface<T>* BaseGenerator() const = 0;
+ // Advances iterator to point to the next element
+ // provided by the generator. The caller is responsible
+ // for not calling Advance() on an iterator equal to
+ // BaseGenerator()->End().
+ virtual void Advance() = 0;
+ // Clones the iterator object. Used for implementing copy semantics
+ // of ParamIterator<T>.
+ virtual ParamIteratorInterface* Clone() const = 0;
+ // Dereferences the current iterator and provides (read-only) access
+ // to the pointed value. It is the caller's responsibility not to call
+ // Current() on an iterator equal to BaseGenerator()->End().
+ // Used for implementing ParamGenerator<T>::operator*().
+ virtual const T* Current() const = 0;
+ // Determines whether the given iterator and other point to the same
+ // element in the sequence generated by the generator.
+ // Used for implementing ParamGenerator<T>::operator==().
+ virtual bool Equals(const ParamIteratorInterface& other) const = 0;
+};
+
+// Class iterating over elements provided by an implementation of
+// ParamGeneratorInterface<T>. It wraps ParamIteratorInterface<T>
+// and implements the const forward iterator concept.
+template <typename T>
+class ParamIterator {
+ public:
+ typedef T value_type;
+ typedef const T& reference;
+ typedef ptrdiff_t difference_type;
+
+ // ParamIterator assumes ownership of the impl_ pointer.
+ ParamIterator(const ParamIterator& other) : impl_(other.impl_->Clone()) {}
+ ParamIterator& operator=(const ParamIterator& other) {
+ if (this != &other)
+ impl_.reset(other.impl_->Clone());
+ return *this;
+ }
+
+ const T& operator*() const { return *impl_->Current(); }
+ const T* operator->() const { return impl_->Current(); }
+ // Prefix version of operator++.
+ ParamIterator& operator++() {
+ impl_->Advance();
+ return *this;
+ }
+ // Postfix version of operator++.
+ ParamIterator operator++(int /*unused*/) {
+ ParamIteratorInterface<T>* clone = impl_->Clone();
+ impl_->Advance();
+ return ParamIterator(clone);
+ }
+ bool operator==(const ParamIterator& other) const {
+ return impl_.get() == other.impl_.get() || impl_->Equals(*other.impl_);
+ }
+ bool operator!=(const ParamIterator& other) const {
+ return !(*this == other);
+ }
+
+ private:
+ friend class ParamGenerator<T>;
+ explicit ParamIterator(ParamIteratorInterface<T>* impl) : impl_(impl) {}
+ scoped_ptr<ParamIteratorInterface<T> > impl_;
+};
+
+// ParamGeneratorInterface<T> is the binary interface to access generators
+// defined in other translation units.
+template <typename T>
+class ParamGeneratorInterface {
+ public:
+ typedef T ParamType;
+
+ virtual ~ParamGeneratorInterface() {}
+
+ // Generator interface definition
+ virtual ParamIteratorInterface<T>* Begin() const = 0;
+ virtual ParamIteratorInterface<T>* End() const = 0;
+};
+
+// Wraps ParamGeneratorInterface<T> and provides general generator syntax
+// compatible with the STL Container concept.
+// This class implements copy initialization semantics and the contained
+// ParamGeneratorInterface<T> instance is shared among all copies
+// of the original object. This is possible because that instance is immutable.
+template<typename T>
+class ParamGenerator {
+ public:
+ typedef ParamIterator<T> iterator;
+
+ explicit ParamGenerator(ParamGeneratorInterface<T>* impl) : impl_(impl) {}
+ ParamGenerator(const ParamGenerator& other) : impl_(other.impl_) {}
+
+ ParamGenerator& operator=(const ParamGenerator& other) {
+ impl_ = other.impl_;
+ return *this;
+ }
+
+ iterator begin() const { return iterator(impl_->Begin()); }
+ iterator end() const { return iterator(impl_->End()); }
+
+ private:
+ linked_ptr<const ParamGeneratorInterface<T> > impl_;
+};
+
+// Generates values from a range of two comparable values. Can be used to
+// generate sequences of user-defined types that implement operator+() and
+// operator<().
+// This class is used in the Range() function.
+template <typename T, typename IncrementT>
+class RangeGenerator : public ParamGeneratorInterface<T> {
+ public:
+ RangeGenerator(T begin, T end, IncrementT step)
+ : begin_(begin), end_(end),
+ step_(step), end_index_(CalculateEndIndex(begin, end, step)) {}
+ virtual ~RangeGenerator() {}
+
+ virtual ParamIteratorInterface<T>* Begin() const {
+ return new Iterator(this, begin_, 0, step_);
+ }
+ virtual ParamIteratorInterface<T>* End() const {
+ return new Iterator(this, end_, end_index_, step_);
+ }
+
+ private:
+ class Iterator : public ParamIteratorInterface<T> {
+ public:
+ Iterator(const ParamGeneratorInterface<T>* base, T value, int index,
+ IncrementT step)
+ : base_(base), value_(value), index_(index), step_(step) {}
+ virtual ~Iterator() {}
+
+ virtual const ParamGeneratorInterface<T>* BaseGenerator() const {
+ return base_;
+ }
+ virtual void Advance() {
+ value_ = value_ + step_;
+ index_++;
+ }
+ virtual ParamIteratorInterface<T>* Clone() const {
+ return new Iterator(*this);
+ }
+ virtual const T* Current() const { return &value_; }
+ virtual bool Equals(const ParamIteratorInterface<T>& other) const {
+ // Having the same base generator guarantees that the other
+ // iterator is of the same type and we can downcast.
+ GTEST_CHECK_(BaseGenerator() == other.BaseGenerator())
+ << "The program attempted to compare iterators "
+ << "from different generators." << std::endl;
+ const int other_index =
+ CheckedDowncastToActualType<const Iterator>(&other)->index_;
+ return index_ == other_index;
+ }
+
+ private:
+ Iterator(const Iterator& other)
+ : ParamIteratorInterface<T>(),
+ base_(other.base_), value_(other.value_), index_(other.index_),
+ step_(other.step_) {}
+
+ // No implementation - assignment is unsupported.
+ void operator=(const Iterator& other);
+
+ const ParamGeneratorInterface<T>* const base_;
+ T value_;
+ int index_;
+ const IncrementT step_;
+ }; // class RangeGenerator::Iterator
+
+ static int CalculateEndIndex(const T& begin,
+ const T& end,
+ const IncrementT& step) {
+ int end_index = 0;
+ for (T i = begin; i < end; i = i + step)
+ end_index++;
+ return end_index;
+ }
+
+ // No implementation - assignment is unsupported.
+ void operator=(const RangeGenerator& other);
+
+ const T begin_;
+ const T end_;
+ const IncrementT step_;
+ // The index for the end() iterator. All the elements in the generated
+ // sequence are indexed (0-based) to aid iterator comparison.
+ const int end_index_;
+}; // class RangeGenerator
+
+
+// Generates values from a pair of STL-style iterators. Used in the
+// ValuesIn() function. The elements are copied from the source range
+// since the source can be located on the stack, and the generator
+// is likely to persist beyond that stack frame.
+template <typename T>
+class ValuesInIteratorRangeGenerator : public ParamGeneratorInterface<T> {
+ public:
+ template <typename ForwardIterator>
+ ValuesInIteratorRangeGenerator(ForwardIterator begin, ForwardIterator end)
+ : container_(begin, end) {}
+ virtual ~ValuesInIteratorRangeGenerator() {}
+
+ virtual ParamIteratorInterface<T>* Begin() const {
+ return new Iterator(this, container_.begin());
+ }
+ virtual ParamIteratorInterface<T>* End() const {
+ return new Iterator(this, container_.end());
+ }
+
+ private:
+ typedef typename ::std::vector<T> ContainerType;
+
+ class Iterator : public ParamIteratorInterface<T> {
+ public:
+ Iterator(const ParamGeneratorInterface<T>* base,
+ typename ContainerType::const_iterator iterator)
+ : base_(base), iterator_(iterator) {}
+ virtual ~Iterator() {}
+
+ virtual const ParamGeneratorInterface<T>* BaseGenerator() const {
+ return base_;
+ }
+ virtual void Advance() {
+ ++iterator_;
+ value_.reset();
+ }
+ virtual ParamIteratorInterface<T>* Clone() const {
+ return new Iterator(*this);
+ }
+ // We need to use cached value referenced by iterator_ because *iterator_
+ // can return a temporary object (and of type other then T), so just
+ // having "return &*iterator_;" doesn't work.
+ // value_ is updated here and not in Advance() because Advance()
+ // can advance iterator_ beyond the end of the range, and we cannot
+ // detect that fact. The client code, on the other hand, is
+ // responsible for not calling Current() on an out-of-range iterator.
+ virtual const T* Current() const {
+ if (value_.get() == NULL)
+ value_.reset(new T(*iterator_));
+ return value_.get();
+ }
+ virtual bool Equals(const ParamIteratorInterface<T>& other) const {
+ // Having the same base generator guarantees that the other
+ // iterator is of the same type and we can downcast.
+ GTEST_CHECK_(BaseGenerator() == other.BaseGenerator())
+ << "The program attempted to compare iterators "
+ << "from different generators." << std::endl;
+ return iterator_ ==
+ CheckedDowncastToActualType<const Iterator>(&other)->iterator_;
+ }
+
+ private:
+ Iterator(const Iterator& other)
+ // The explicit constructor call suppresses a false warning
+ // emitted by gcc when supplied with the -Wextra option.
+ : ParamIteratorInterface<T>(),
+ base_(other.base_),
+ iterator_(other.iterator_) {}
+
+ const ParamGeneratorInterface<T>* const base_;
+ typename ContainerType::const_iterator iterator_;
+ // A cached value of *iterator_. We keep it here to allow access by
+ // pointer in the wrapping iterator's operator->().
+ // value_ needs to be mutable to be accessed in Current().
+ // Use of scoped_ptr helps manage cached value's lifetime,
+ // which is bound by the lifespan of the iterator itself.
+ mutable scoped_ptr<const T> value_;
+ }; // class ValuesInIteratorRangeGenerator::Iterator
+
+ // No implementation - assignment is unsupported.
+ void operator=(const ValuesInIteratorRangeGenerator& other);
+
+ const ContainerType container_;
+}; // class ValuesInIteratorRangeGenerator
+
+// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
+//
+// Stores a parameter value and later creates tests parameterized with that
+// value.
+template <class TestClass>
+class ParameterizedTestFactory : public TestFactoryBase {
+ public:
+ typedef typename TestClass::ParamType ParamType;
+ explicit ParameterizedTestFactory(ParamType parameter) :
+ parameter_(parameter) {}
+ virtual Test* CreateTest() {
+ TestClass::SetParam(&parameter_);
+ return new TestClass();
+ }
+
+ private:
+ const ParamType parameter_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(ParameterizedTestFactory);
+};
+
+// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
+//
+// TestMetaFactoryBase is a base class for meta-factories that create
+// test factories for passing into MakeAndRegisterTestInfo function.
+template <class ParamType>
+class TestMetaFactoryBase {
+ public:
+ virtual ~TestMetaFactoryBase() {}
+
+ virtual TestFactoryBase* CreateTestFactory(ParamType parameter) = 0;
+};
+
+// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
+//
+// TestMetaFactory creates test factories for passing into
+// MakeAndRegisterTestInfo function. Since MakeAndRegisterTestInfo receives
+// ownership of test factory pointer, same factory object cannot be passed
+// into that method twice. But ParameterizedTestCaseInfo is going to call
+// it for each Test/Parameter value combination. Thus it needs meta factory
+// creator class.
+template <class TestCase>
+class TestMetaFactory
+ : public TestMetaFactoryBase<typename TestCase::ParamType> {
+ public:
+ typedef typename TestCase::ParamType ParamType;
+
+ TestMetaFactory() {}
+
+ virtual TestFactoryBase* CreateTestFactory(ParamType parameter) {
+ return new ParameterizedTestFactory<TestCase>(parameter);
+ }
+
+ private:
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(TestMetaFactory);
+};
+
+// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
+//
+// ParameterizedTestCaseInfoBase is a generic interface
+// to ParameterizedTestCaseInfo classes. ParameterizedTestCaseInfoBase
+// accumulates test information provided by TEST_P macro invocations
+// and generators provided by INSTANTIATE_TEST_CASE_P macro invocations
+// and uses that information to register all resulting test instances
+// in RegisterTests method. The ParameterizeTestCaseRegistry class holds
+// a collection of pointers to the ParameterizedTestCaseInfo objects
+// and calls RegisterTests() on each of them when asked.
+class ParameterizedTestCaseInfoBase {
+ public:
+ virtual ~ParameterizedTestCaseInfoBase() {}
+
+ // Base part of test case name for display purposes.
+ virtual const string& GetTestCaseName() const = 0;
+ // Test case id to verify identity.
+ virtual TypeId GetTestCaseTypeId() const = 0;
+ // UnitTest class invokes this method to register tests in this
+ // test case right before running them in RUN_ALL_TESTS macro.
+ // This method should not be called more then once on any single
+ // instance of a ParameterizedTestCaseInfoBase derived class.
+ virtual void RegisterTests() = 0;
+
+ protected:
+ ParameterizedTestCaseInfoBase() {}
+
+ private:
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(ParameterizedTestCaseInfoBase);
+};
+
+// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
+//
+// ParameterizedTestCaseInfo accumulates tests obtained from TEST_P
+// macro invocations for a particular test case and generators
+// obtained from INSTANTIATE_TEST_CASE_P macro invocations for that
+// test case. It registers tests with all values generated by all
+// generators when asked.
+template <class TestCase>
+class ParameterizedTestCaseInfo : public ParameterizedTestCaseInfoBase {
+ public:
+ // ParamType and GeneratorCreationFunc are private types but are required
+ // for declarations of public methods AddTestPattern() and
+ // AddTestCaseInstantiation().
+ typedef typename TestCase::ParamType ParamType;
+ // A function that returns an instance of appropriate generator type.
+ typedef ParamGenerator<ParamType>(GeneratorCreationFunc)();
+
+ explicit ParameterizedTestCaseInfo(const char* name)
+ : test_case_name_(name) {}
+
+ // Test case base name for display purposes.
+ virtual const string& GetTestCaseName() const { return test_case_name_; }
+ // Test case id to verify identity.
+ virtual TypeId GetTestCaseTypeId() const { return GetTypeId<TestCase>(); }
+ // TEST_P macro uses AddTestPattern() to record information
+ // about a single test in a LocalTestInfo structure.
+ // test_case_name is the base name of the test case (without invocation
+ // prefix). test_base_name is the name of an individual test without
+ // parameter index. For the test SequenceA/FooTest.DoBar/1 FooTest is
+ // test case base name and DoBar is test base name.
+ void AddTestPattern(const char* test_case_name,
+ const char* test_base_name,
+ TestMetaFactoryBase<ParamType>* meta_factory) {
+ tests_.push_back(linked_ptr<TestInfo>(new TestInfo(test_case_name,
+ test_base_name,
+ meta_factory)));
+ }
+ // INSTANTIATE_TEST_CASE_P macro uses AddGenerator() to record information
+ // about a generator.
+ int AddTestCaseInstantiation(const string& instantiation_name,
+ GeneratorCreationFunc* func,
+ const char* /* file */,
+ int /* line */) {
+ instantiations_.push_back(::std::make_pair(instantiation_name, func));
+ return 0; // Return value used only to run this method in namespace scope.
+ }
+ // UnitTest class invokes this method to register tests in this test case
+ // test cases right before running tests in RUN_ALL_TESTS macro.
+ // This method should not be called more then once on any single
+ // instance of a ParameterizedTestCaseInfoBase derived class.
+ // UnitTest has a guard to prevent from calling this method more then once.
+ virtual void RegisterTests() {
+ for (typename TestInfoContainer::iterator test_it = tests_.begin();
+ test_it != tests_.end(); ++test_it) {
+ linked_ptr<TestInfo> test_info = *test_it;
+ for (typename InstantiationContainer::iterator gen_it =
+ instantiations_.begin(); gen_it != instantiations_.end();
+ ++gen_it) {
+ const string& instantiation_name = gen_it->first;
+ ParamGenerator<ParamType> generator((*gen_it->second)());
+
+ string test_case_name;
+ if ( !instantiation_name.empty() )
+ test_case_name = instantiation_name + "/";
+ test_case_name += test_info->test_case_base_name;
+
+ int i = 0;
+ for (typename ParamGenerator<ParamType>::iterator param_it =
+ generator.begin();
+ param_it != generator.end(); ++param_it, ++i) {
+ Message test_name_stream;
+ test_name_stream << test_info->test_base_name << "/" << i;
+ MakeAndRegisterTestInfo(
+ test_case_name.c_str(),
+ test_name_stream.GetString().c_str(),
+ NULL, // No type parameter.
+ PrintToString(*param_it).c_str(),
+ GetTestCaseTypeId(),
+ TestCase::SetUpTestCase,
+ TestCase::TearDownTestCase,
+ test_info->test_meta_factory->CreateTestFactory(*param_it));
+ } // for param_it
+ } // for gen_it
+ } // for test_it
+ } // RegisterTests
+
+ private:
+ // LocalTestInfo structure keeps information about a single test registered
+ // with TEST_P macro.
+ struct TestInfo {
+ TestInfo(const char* a_test_case_base_name,
+ const char* a_test_base_name,
+ TestMetaFactoryBase<ParamType>* a_test_meta_factory) :
+ test_case_base_name(a_test_case_base_name),
+ test_base_name(a_test_base_name),
+ test_meta_factory(a_test_meta_factory) {}
+
+ const string test_case_base_name;
+ const string test_base_name;
+ const scoped_ptr<TestMetaFactoryBase<ParamType> > test_meta_factory;
+ };
+ typedef ::std::vector<linked_ptr<TestInfo> > TestInfoContainer;
+ // Keeps pairs of <Instantiation name, Sequence generator creation function>
+ // received from INSTANTIATE_TEST_CASE_P macros.
+ typedef ::std::vector<std::pair<string, GeneratorCreationFunc*> >
+ InstantiationContainer;
+
+ const string test_case_name_;
+ TestInfoContainer tests_;
+ InstantiationContainer instantiations_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(ParameterizedTestCaseInfo);
+}; // class ParameterizedTestCaseInfo
+
+// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
+//
+// ParameterizedTestCaseRegistry contains a map of ParameterizedTestCaseInfoBase
+// classes accessed by test case names. TEST_P and INSTANTIATE_TEST_CASE_P
+// macros use it to locate their corresponding ParameterizedTestCaseInfo
+// descriptors.
+class ParameterizedTestCaseRegistry {
+ public:
+ ParameterizedTestCaseRegistry() {}
+ ~ParameterizedTestCaseRegistry() {
+ for (TestCaseInfoContainer::iterator it = test_case_infos_.begin();
+ it != test_case_infos_.end(); ++it) {
+ delete *it;
+ }
+ }
+
+ // Looks up or creates and returns a structure containing information about
+ // tests and instantiations of a particular test case.
+ template <class TestCase>
+ ParameterizedTestCaseInfo<TestCase>* GetTestCasePatternHolder(
+ const char* test_case_name,
+ const char* file,
+ int line) {
+ ParameterizedTestCaseInfo<TestCase>* typed_test_info = NULL;
+ for (TestCaseInfoContainer::iterator it = test_case_infos_.begin();
+ it != test_case_infos_.end(); ++it) {
+ if ((*it)->GetTestCaseName() == test_case_name) {
+ if ((*it)->GetTestCaseTypeId() != GetTypeId<TestCase>()) {
+ // Complain about incorrect usage of Google Test facilities
+ // and terminate the program since we cannot guaranty correct
+ // test case setup and tear-down in this case.
+ ReportInvalidTestCaseType(test_case_name, file, line);
+ posix::Abort();
+ } else {
+ // At this point we are sure that the object we found is of the same
+ // type we are looking for, so we downcast it to that type
+ // without further checks.
+ typed_test_info = CheckedDowncastToActualType<
+ ParameterizedTestCaseInfo<TestCase> >(*it);
+ }
+ break;
+ }
+ }
+ if (typed_test_info == NULL) {
+ typed_test_info = new ParameterizedTestCaseInfo<TestCase>(test_case_name);
+ test_case_infos_.push_back(typed_test_info);
+ }
+ return typed_test_info;
+ }
+ void RegisterTests() {
+ for (TestCaseInfoContainer::iterator it = test_case_infos_.begin();
+ it != test_case_infos_.end(); ++it) {
+ (*it)->RegisterTests();
+ }
+ }
+
+ private:
+ typedef ::std::vector<ParameterizedTestCaseInfoBase*> TestCaseInfoContainer;
+
+ TestCaseInfoContainer test_case_infos_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(ParameterizedTestCaseRegistry);
+};
+
+} // namespace internal
+} // namespace testing
+
+#endif // GTEST_HAS_PARAM_TEST
+
+#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PARAM_UTIL_H_
+// This file was GENERATED by command:
+// pump.py gtest-param-util-generated.h.pump
+// DO NOT EDIT BY HAND!!!
+
+// Copyright 2008 Google Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: vladl@google.com (Vlad Losev)
+
+// Type and function utilities for implementing parameterized tests.
+// This file is generated by a SCRIPT. DO NOT EDIT BY HAND!
+//
+// Currently Google Test supports at most 50 arguments in Values,
+// and at most 10 arguments in Combine. Please contact
+// googletestframework@googlegroups.com if you need more.
+// Please note that the number of arguments to Combine is limited
+// by the maximum arity of the implementation of tr1::tuple which is
+// currently set at 10.
+
+#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PARAM_UTIL_GENERATED_H_
+#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PARAM_UTIL_GENERATED_H_
+
+// scripts/fuse_gtest.py depends on gtest's own header being #included
+// *unconditionally*. Therefore these #includes cannot be moved
+// inside #if GTEST_HAS_PARAM_TEST.
+
+#if GTEST_HAS_PARAM_TEST
+
+namespace testing {
+
+// Forward declarations of ValuesIn(), which is implemented in
+// include/gtest/gtest-param-test.h.
+template <typename ForwardIterator>
+internal::ParamGenerator<
+ typename ::testing::internal::IteratorTraits<ForwardIterator>::value_type>
+ValuesIn(ForwardIterator begin, ForwardIterator end);
+
+template <typename T, size_t N>
+internal::ParamGenerator<T> ValuesIn(const T (&array)[N]);
+
+template <class Container>
+internal::ParamGenerator<typename Container::value_type> ValuesIn(
+ const Container& container);
+
+namespace internal {
+
+// Used in the Values() function to provide polymorphic capabilities.
+template <typename T1>
+class ValueArray1 {
+ public:
+ explicit ValueArray1(T1 v1) : v1_(v1) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const { return ValuesIn(&v1_, &v1_ + 1); }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray1& other);
+
+ const T1 v1_;
+};
+
+template <typename T1, typename T2>
+class ValueArray2 {
+ public:
+ ValueArray2(T1 v1, T2 v2) : v1_(v1), v2_(v2) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_)};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray2& other);
+
+ const T1 v1_;
+ const T2 v2_;
+};
+
+template <typename T1, typename T2, typename T3>
+class ValueArray3 {
+ public:
+ ValueArray3(T1 v1, T2 v2, T3 v3) : v1_(v1), v2_(v2), v3_(v3) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+ static_cast<T>(v3_)};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray3& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4>
+class ValueArray4 {
+ public:
+ ValueArray4(T1 v1, T2 v2, T3 v3, T4 v4) : v1_(v1), v2_(v2), v3_(v3),
+ v4_(v4) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+ static_cast<T>(v3_), static_cast<T>(v4_)};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray4& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5>
+class ValueArray5 {
+ public:
+ ValueArray5(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5) : v1_(v1), v2_(v2), v3_(v3),
+ v4_(v4), v5_(v5) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+ static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_)};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray5& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6>
+class ValueArray6 {
+ public:
+ ValueArray6(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6) : v1_(v1), v2_(v2),
+ v3_(v3), v4_(v4), v5_(v5), v6_(v6) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+ static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+ static_cast<T>(v6_)};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray6& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7>
+class ValueArray7 {
+ public:
+ ValueArray7(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7) : v1_(v1),
+ v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+ static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+ static_cast<T>(v6_), static_cast<T>(v7_)};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray7& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8>
+class ValueArray8 {
+ public:
+ ValueArray8(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7,
+ T8 v8) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),
+ v8_(v8) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+ static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+ static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_)};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray8& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9>
+class ValueArray9 {
+ public:
+ ValueArray9(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8,
+ T9 v9) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),
+ v8_(v8), v9_(v9) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+ static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+ static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+ static_cast<T>(v9_)};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray9& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10>
+class ValueArray10 {
+ public:
+ ValueArray10(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),
+ v8_(v8), v9_(v9), v10_(v10) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+ static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+ static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+ static_cast<T>(v9_), static_cast<T>(v10_)};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray10& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11>
+class ValueArray11 {
+ public:
+ ValueArray11(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6),
+ v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+ static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+ static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+ static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_)};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray11& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12>
+class ValueArray12 {
+ public:
+ ValueArray12(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5),
+ v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+ static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+ static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+ static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+ static_cast<T>(v12_)};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray12& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13>
+class ValueArray13 {
+ public:
+ ValueArray13(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13) : v1_(v1), v2_(v2), v3_(v3), v4_(v4),
+ v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11),
+ v12_(v12), v13_(v13) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+ static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+ static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+ static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+ static_cast<T>(v12_), static_cast<T>(v13_)};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray13& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14>
+class ValueArray14 {
+ public:
+ ValueArray14(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14) : v1_(v1), v2_(v2), v3_(v3),
+ v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10),
+ v11_(v11), v12_(v12), v13_(v13), v14_(v14) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+ static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+ static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+ static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+ static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_)};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray14& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15>
+class ValueArray15 {
+ public:
+ ValueArray15(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15) : v1_(v1), v2_(v2),
+ v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10),
+ v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+ static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+ static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+ static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+ static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+ static_cast<T>(v15_)};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray15& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16>
+class ValueArray16 {
+ public:
+ ValueArray16(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16) : v1_(v1),
+ v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9),
+ v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15),
+ v16_(v16) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+ static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+ static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+ static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+ static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+ static_cast<T>(v15_), static_cast<T>(v16_)};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray16& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17>
+class ValueArray17 {
+ public:
+ ValueArray17(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16,
+ T17 v17) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),
+ v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14),
+ v15_(v15), v16_(v16), v17_(v17) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+ static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+ static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+ static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+ static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+ static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_)};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray17& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18>
+class ValueArray18 {
+ public:
+ ValueArray18(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),
+ v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14),
+ v15_(v15), v16_(v16), v17_(v17), v18_(v18) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+ static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+ static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+ static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+ static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+ static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
+ static_cast<T>(v18_)};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray18& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19>
+class ValueArray19 {
+ public:
+ ValueArray19(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6),
+ v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13),
+ v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+ static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+ static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+ static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+ static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+ static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
+ static_cast<T>(v18_), static_cast<T>(v19_)};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray19& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20>
+class ValueArray20 {
+ public:
+ ValueArray20(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5),
+ v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12),
+ v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18),
+ v19_(v19), v20_(v20) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+ static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+ static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+ static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+ static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+ static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
+ static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_)};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray20& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21>
+class ValueArray21 {
+ public:
+ ValueArray21(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21) : v1_(v1), v2_(v2), v3_(v3), v4_(v4),
+ v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11),
+ v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17),
+ v18_(v18), v19_(v19), v20_(v20), v21_(v21) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+ static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+ static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+ static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+ static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+ static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
+ static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
+ static_cast<T>(v21_)};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray21& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22>
+class ValueArray22 {
+ public:
+ ValueArray22(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22) : v1_(v1), v2_(v2), v3_(v3),
+ v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10),
+ v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16),
+ v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+ static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+ static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+ static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+ static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+ static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
+ static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
+ static_cast<T>(v21_), static_cast<T>(v22_)};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray22& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23>
+class ValueArray23 {
+ public:
+ ValueArray23(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23) : v1_(v1), v2_(v2),
+ v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10),
+ v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16),
+ v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22),
+ v23_(v23) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+ static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+ static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+ static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+ static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+ static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
+ static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
+ static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_)};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray23& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24>
+class ValueArray24 {
+ public:
+ ValueArray24(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24) : v1_(v1),
+ v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9),
+ v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15),
+ v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21),
+ v22_(v22), v23_(v23), v24_(v24) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+ static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+ static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+ static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+ static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+ static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
+ static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
+ static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
+ static_cast<T>(v24_)};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray24& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25>
+class ValueArray25 {
+ public:
+ ValueArray25(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24,
+ T25 v25) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),
+ v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14),
+ v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20),
+ v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+ static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+ static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+ static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+ static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+ static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
+ static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
+ static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
+ static_cast<T>(v24_), static_cast<T>(v25_)};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray25& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+ const T25 v25_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26>
+class ValueArray26 {
+ public:
+ ValueArray26(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),
+ v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14),
+ v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20),
+ v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+ static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+ static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+ static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+ static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+ static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
+ static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
+ static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
+ static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_)};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray26& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+ const T25 v25_;
+ const T26 v26_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27>
+class ValueArray27 {
+ public:
+ ValueArray27(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6),
+ v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13),
+ v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19),
+ v20_(v20), v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25),
+ v26_(v26), v27_(v27) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+ static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+ static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+ static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+ static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+ static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
+ static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
+ static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
+ static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),
+ static_cast<T>(v27_)};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray27& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+ const T25 v25_;
+ const T26 v26_;
+ const T27 v27_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28>
+class ValueArray28 {
+ public:
+ ValueArray28(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5),
+ v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12),
+ v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18),
+ v19_(v19), v20_(v20), v21_(v21), v22_(v22), v23_(v23), v24_(v24),
+ v25_(v25), v26_(v26), v27_(v27), v28_(v28) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+ static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+ static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+ static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+ static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+ static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
+ static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
+ static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
+ static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),
+ static_cast<T>(v27_), static_cast<T>(v28_)};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray28& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+ const T25 v25_;
+ const T26 v26_;
+ const T27 v27_;
+ const T28 v28_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29>
+class ValueArray29 {
+ public:
+ ValueArray29(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29) : v1_(v1), v2_(v2), v3_(v3), v4_(v4),
+ v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11),
+ v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17),
+ v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22), v23_(v23),
+ v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28), v29_(v29) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+ static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+ static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+ static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+ static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+ static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
+ static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
+ static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
+ static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),
+ static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_)};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray29& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+ const T25 v25_;
+ const T26 v26_;
+ const T27 v27_;
+ const T28 v28_;
+ const T29 v29_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30>
+class ValueArray30 {
+ public:
+ ValueArray30(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29, T30 v30) : v1_(v1), v2_(v2), v3_(v3),
+ v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10),
+ v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16),
+ v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22),
+ v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28),
+ v29_(v29), v30_(v30) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+ static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+ static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+ static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+ static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+ static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
+ static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
+ static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
+ static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),
+ static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),
+ static_cast<T>(v30_)};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray30& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+ const T25 v25_;
+ const T26 v26_;
+ const T27 v27_;
+ const T28 v28_;
+ const T29 v29_;
+ const T30 v30_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31>
+class ValueArray31 {
+ public:
+ ValueArray31(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31) : v1_(v1), v2_(v2),
+ v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10),
+ v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16),
+ v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22),
+ v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28),
+ v29_(v29), v30_(v30), v31_(v31) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+ static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+ static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+ static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+ static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+ static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
+ static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
+ static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
+ static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),
+ static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),
+ static_cast<T>(v30_), static_cast<T>(v31_)};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray31& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+ const T25 v25_;
+ const T26 v26_;
+ const T27 v27_;
+ const T28 v28_;
+ const T29 v29_;
+ const T30 v30_;
+ const T31 v31_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32>
+class ValueArray32 {
+ public:
+ ValueArray32(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32) : v1_(v1),
+ v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9),
+ v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15),
+ v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21),
+ v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27),
+ v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+ static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+ static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+ static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+ static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+ static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
+ static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
+ static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
+ static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),
+ static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),
+ static_cast<T>(v30_), static_cast<T>(v31_), static_cast<T>(v32_)};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray32& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+ const T25 v25_;
+ const T26 v26_;
+ const T27 v27_;
+ const T28 v28_;
+ const T29 v29_;
+ const T30 v30_;
+ const T31 v31_;
+ const T32 v32_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33>
+class ValueArray33 {
+ public:
+ ValueArray33(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32,
+ T33 v33) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),
+ v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14),
+ v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20),
+ v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26),
+ v27_(v27), v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32),
+ v33_(v33) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+ static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+ static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+ static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+ static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+ static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
+ static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
+ static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
+ static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),
+ static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),
+ static_cast<T>(v30_), static_cast<T>(v31_), static_cast<T>(v32_),
+ static_cast<T>(v33_)};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray33& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+ const T25 v25_;
+ const T26 v26_;
+ const T27 v27_;
+ const T28 v28_;
+ const T29 v29_;
+ const T30 v30_;
+ const T31 v31_;
+ const T32 v32_;
+ const T33 v33_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34>
+class ValueArray34 {
+ public:
+ ValueArray34(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+ T34 v34) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),
+ v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14),
+ v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20),
+ v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26),
+ v27_(v27), v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32),
+ v33_(v33), v34_(v34) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+ static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+ static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+ static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+ static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+ static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
+ static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
+ static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
+ static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),
+ static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),
+ static_cast<T>(v30_), static_cast<T>(v31_), static_cast<T>(v32_),
+ static_cast<T>(v33_), static_cast<T>(v34_)};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray34& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+ const T25 v25_;
+ const T26 v26_;
+ const T27 v27_;
+ const T28 v28_;
+ const T29 v29_;
+ const T30 v30_;
+ const T31 v31_;
+ const T32 v32_;
+ const T33 v33_;
+ const T34 v34_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35>
+class ValueArray35 {
+ public:
+ ValueArray35(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+ T34 v34, T35 v35) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6),
+ v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13),
+ v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19),
+ v20_(v20), v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25),
+ v26_(v26), v27_(v27), v28_(v28), v29_(v29), v30_(v30), v31_(v31),
+ v32_(v32), v33_(v33), v34_(v34), v35_(v35) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+ static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+ static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+ static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+ static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+ static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
+ static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
+ static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
+ static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),
+ static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),
+ static_cast<T>(v30_), static_cast<T>(v31_), static_cast<T>(v32_),
+ static_cast<T>(v33_), static_cast<T>(v34_), static_cast<T>(v35_)};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray35& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+ const T25 v25_;
+ const T26 v26_;
+ const T27 v27_;
+ const T28 v28_;
+ const T29 v29_;
+ const T30 v30_;
+ const T31 v31_;
+ const T32 v32_;
+ const T33 v33_;
+ const T34 v34_;
+ const T35 v35_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36>
+class ValueArray36 {
+ public:
+ ValueArray36(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+ T34 v34, T35 v35, T36 v36) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5),
+ v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12),
+ v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18),
+ v19_(v19), v20_(v20), v21_(v21), v22_(v22), v23_(v23), v24_(v24),
+ v25_(v25), v26_(v26), v27_(v27), v28_(v28), v29_(v29), v30_(v30),
+ v31_(v31), v32_(v32), v33_(v33), v34_(v34), v35_(v35), v36_(v36) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+ static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+ static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+ static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+ static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+ static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
+ static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
+ static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
+ static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),
+ static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),
+ static_cast<T>(v30_), static_cast<T>(v31_), static_cast<T>(v32_),
+ static_cast<T>(v33_), static_cast<T>(v34_), static_cast<T>(v35_),
+ static_cast<T>(v36_)};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray36& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+ const T25 v25_;
+ const T26 v26_;
+ const T27 v27_;
+ const T28 v28_;
+ const T29 v29_;
+ const T30 v30_;
+ const T31 v31_;
+ const T32 v32_;
+ const T33 v33_;
+ const T34 v34_;
+ const T35 v35_;
+ const T36 v36_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37>
+class ValueArray37 {
+ public:
+ ValueArray37(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+ T34 v34, T35 v35, T36 v36, T37 v37) : v1_(v1), v2_(v2), v3_(v3), v4_(v4),
+ v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11),
+ v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17),
+ v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22), v23_(v23),
+ v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28), v29_(v29),
+ v30_(v30), v31_(v31), v32_(v32), v33_(v33), v34_(v34), v35_(v35),
+ v36_(v36), v37_(v37) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+ static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+ static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+ static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+ static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+ static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
+ static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
+ static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
+ static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),
+ static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),
+ static_cast<T>(v30_), static_cast<T>(v31_), static_cast<T>(v32_),
+ static_cast<T>(v33_), static_cast<T>(v34_), static_cast<T>(v35_),
+ static_cast<T>(v36_), static_cast<T>(v37_)};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray37& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+ const T25 v25_;
+ const T26 v26_;
+ const T27 v27_;
+ const T28 v28_;
+ const T29 v29_;
+ const T30 v30_;
+ const T31 v31_;
+ const T32 v32_;
+ const T33 v33_;
+ const T34 v34_;
+ const T35 v35_;
+ const T36 v36_;
+ const T37 v37_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38>
+class ValueArray38 {
+ public:
+ ValueArray38(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+ T34 v34, T35 v35, T36 v36, T37 v37, T38 v38) : v1_(v1), v2_(v2), v3_(v3),
+ v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10),
+ v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16),
+ v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22),
+ v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28),
+ v29_(v29), v30_(v30), v31_(v31), v32_(v32), v33_(v33), v34_(v34),
+ v35_(v35), v36_(v36), v37_(v37), v38_(v38) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+ static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+ static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+ static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+ static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+ static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
+ static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
+ static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
+ static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),
+ static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),
+ static_cast<T>(v30_), static_cast<T>(v31_), static_cast<T>(v32_),
+ static_cast<T>(v33_), static_cast<T>(v34_), static_cast<T>(v35_),
+ static_cast<T>(v36_), static_cast<T>(v37_), static_cast<T>(v38_)};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray38& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+ const T25 v25_;
+ const T26 v26_;
+ const T27 v27_;
+ const T28 v28_;
+ const T29 v29_;
+ const T30 v30_;
+ const T31 v31_;
+ const T32 v32_;
+ const T33 v33_;
+ const T34 v34_;
+ const T35 v35_;
+ const T36 v36_;
+ const T37 v37_;
+ const T38 v38_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39>
+class ValueArray39 {
+ public:
+ ValueArray39(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+ T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39) : v1_(v1), v2_(v2),
+ v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10),
+ v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16),
+ v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22),
+ v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28),
+ v29_(v29), v30_(v30), v31_(v31), v32_(v32), v33_(v33), v34_(v34),
+ v35_(v35), v36_(v36), v37_(v37), v38_(v38), v39_(v39) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+ static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+ static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+ static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+ static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+ static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
+ static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
+ static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
+ static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),
+ static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),
+ static_cast<T>(v30_), static_cast<T>(v31_), static_cast<T>(v32_),
+ static_cast<T>(v33_), static_cast<T>(v34_), static_cast<T>(v35_),
+ static_cast<T>(v36_), static_cast<T>(v37_), static_cast<T>(v38_),
+ static_cast<T>(v39_)};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray39& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+ const T25 v25_;
+ const T26 v26_;
+ const T27 v27_;
+ const T28 v28_;
+ const T29 v29_;
+ const T30 v30_;
+ const T31 v31_;
+ const T32 v32_;
+ const T33 v33_;
+ const T34 v34_;
+ const T35 v35_;
+ const T36 v36_;
+ const T37 v37_;
+ const T38 v38_;
+ const T39 v39_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40>
+class ValueArray40 {
+ public:
+ ValueArray40(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+ T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40) : v1_(v1),
+ v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9),
+ v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15),
+ v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21),
+ v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27),
+ v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32), v33_(v33),
+ v34_(v34), v35_(v35), v36_(v36), v37_(v37), v38_(v38), v39_(v39),
+ v40_(v40) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+ static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+ static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+ static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+ static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+ static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
+ static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
+ static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
+ static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),
+ static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),
+ static_cast<T>(v30_), static_cast<T>(v31_), static_cast<T>(v32_),
+ static_cast<T>(v33_), static_cast<T>(v34_), static_cast<T>(v35_),
+ static_cast<T>(v36_), static_cast<T>(v37_), static_cast<T>(v38_),
+ static_cast<T>(v39_), static_cast<T>(v40_)};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray40& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+ const T25 v25_;
+ const T26 v26_;
+ const T27 v27_;
+ const T28 v28_;
+ const T29 v29_;
+ const T30 v30_;
+ const T31 v31_;
+ const T32 v32_;
+ const T33 v33_;
+ const T34 v34_;
+ const T35 v35_;
+ const T36 v36_;
+ const T37 v37_;
+ const T38 v38_;
+ const T39 v39_;
+ const T40 v40_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41>
+class ValueArray41 {
+ public:
+ ValueArray41(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+ T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40,
+ T41 v41) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),
+ v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14),
+ v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20),
+ v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26),
+ v27_(v27), v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32),
+ v33_(v33), v34_(v34), v35_(v35), v36_(v36), v37_(v37), v38_(v38),
+ v39_(v39), v40_(v40), v41_(v41) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+ static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+ static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+ static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+ static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+ static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
+ static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
+ static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
+ static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),
+ static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),
+ static_cast<T>(v30_), static_cast<T>(v31_), static_cast<T>(v32_),
+ static_cast<T>(v33_), static_cast<T>(v34_), static_cast<T>(v35_),
+ static_cast<T>(v36_), static_cast<T>(v37_), static_cast<T>(v38_),
+ static_cast<T>(v39_), static_cast<T>(v40_), static_cast<T>(v41_)};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray41& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+ const T25 v25_;
+ const T26 v26_;
+ const T27 v27_;
+ const T28 v28_;
+ const T29 v29_;
+ const T30 v30_;
+ const T31 v31_;
+ const T32 v32_;
+ const T33 v33_;
+ const T34 v34_;
+ const T35 v35_;
+ const T36 v36_;
+ const T37 v37_;
+ const T38 v38_;
+ const T39 v39_;
+ const T40 v40_;
+ const T41 v41_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42>
+class ValueArray42 {
+ public:
+ ValueArray42(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+ T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41,
+ T42 v42) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),
+ v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14),
+ v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20),
+ v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26),
+ v27_(v27), v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32),
+ v33_(v33), v34_(v34), v35_(v35), v36_(v36), v37_(v37), v38_(v38),
+ v39_(v39), v40_(v40), v41_(v41), v42_(v42) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+ static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+ static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+ static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+ static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+ static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
+ static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
+ static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
+ static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),
+ static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),
+ static_cast<T>(v30_), static_cast<T>(v31_), static_cast<T>(v32_),
+ static_cast<T>(v33_), static_cast<T>(v34_), static_cast<T>(v35_),
+ static_cast<T>(v36_), static_cast<T>(v37_), static_cast<T>(v38_),
+ static_cast<T>(v39_), static_cast<T>(v40_), static_cast<T>(v41_),
+ static_cast<T>(v42_)};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray42& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+ const T25 v25_;
+ const T26 v26_;
+ const T27 v27_;
+ const T28 v28_;
+ const T29 v29_;
+ const T30 v30_;
+ const T31 v31_;
+ const T32 v32_;
+ const T33 v33_;
+ const T34 v34_;
+ const T35 v35_;
+ const T36 v36_;
+ const T37 v37_;
+ const T38 v38_;
+ const T39 v39_;
+ const T40 v40_;
+ const T41 v41_;
+ const T42 v42_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43>
+class ValueArray43 {
+ public:
+ ValueArray43(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+ T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41,
+ T42 v42, T43 v43) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6),
+ v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13),
+ v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19),
+ v20_(v20), v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25),
+ v26_(v26), v27_(v27), v28_(v28), v29_(v29), v30_(v30), v31_(v31),
+ v32_(v32), v33_(v33), v34_(v34), v35_(v35), v36_(v36), v37_(v37),
+ v38_(v38), v39_(v39), v40_(v40), v41_(v41), v42_(v42), v43_(v43) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+ static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+ static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+ static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+ static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+ static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
+ static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
+ static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
+ static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),
+ static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),
+ static_cast<T>(v30_), static_cast<T>(v31_), static_cast<T>(v32_),
+ static_cast<T>(v33_), static_cast<T>(v34_), static_cast<T>(v35_),
+ static_cast<T>(v36_), static_cast<T>(v37_), static_cast<T>(v38_),
+ static_cast<T>(v39_), static_cast<T>(v40_), static_cast<T>(v41_),
+ static_cast<T>(v42_), static_cast<T>(v43_)};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray43& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+ const T25 v25_;
+ const T26 v26_;
+ const T27 v27_;
+ const T28 v28_;
+ const T29 v29_;
+ const T30 v30_;
+ const T31 v31_;
+ const T32 v32_;
+ const T33 v33_;
+ const T34 v34_;
+ const T35 v35_;
+ const T36 v36_;
+ const T37 v37_;
+ const T38 v38_;
+ const T39 v39_;
+ const T40 v40_;
+ const T41 v41_;
+ const T42 v42_;
+ const T43 v43_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44>
+class ValueArray44 {
+ public:
+ ValueArray44(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+ T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41,
+ T42 v42, T43 v43, T44 v44) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5),
+ v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12),
+ v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18),
+ v19_(v19), v20_(v20), v21_(v21), v22_(v22), v23_(v23), v24_(v24),
+ v25_(v25), v26_(v26), v27_(v27), v28_(v28), v29_(v29), v30_(v30),
+ v31_(v31), v32_(v32), v33_(v33), v34_(v34), v35_(v35), v36_(v36),
+ v37_(v37), v38_(v38), v39_(v39), v40_(v40), v41_(v41), v42_(v42),
+ v43_(v43), v44_(v44) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+ static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+ static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+ static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+ static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+ static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
+ static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
+ static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
+ static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),
+ static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),
+ static_cast<T>(v30_), static_cast<T>(v31_), static_cast<T>(v32_),
+ static_cast<T>(v33_), static_cast<T>(v34_), static_cast<T>(v35_),
+ static_cast<T>(v36_), static_cast<T>(v37_), static_cast<T>(v38_),
+ static_cast<T>(v39_), static_cast<T>(v40_), static_cast<T>(v41_),
+ static_cast<T>(v42_), static_cast<T>(v43_), static_cast<T>(v44_)};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray44& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+ const T25 v25_;
+ const T26 v26_;
+ const T27 v27_;
+ const T28 v28_;
+ const T29 v29_;
+ const T30 v30_;
+ const T31 v31_;
+ const T32 v32_;
+ const T33 v33_;
+ const T34 v34_;
+ const T35 v35_;
+ const T36 v36_;
+ const T37 v37_;
+ const T38 v38_;
+ const T39 v39_;
+ const T40 v40_;
+ const T41 v41_;
+ const T42 v42_;
+ const T43 v43_;
+ const T44 v44_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44, typename T45>
+class ValueArray45 {
+ public:
+ ValueArray45(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+ T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41,
+ T42 v42, T43 v43, T44 v44, T45 v45) : v1_(v1), v2_(v2), v3_(v3), v4_(v4),
+ v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11),
+ v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17),
+ v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22), v23_(v23),
+ v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28), v29_(v29),
+ v30_(v30), v31_(v31), v32_(v32), v33_(v33), v34_(v34), v35_(v35),
+ v36_(v36), v37_(v37), v38_(v38), v39_(v39), v40_(v40), v41_(v41),
+ v42_(v42), v43_(v43), v44_(v44), v45_(v45) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+ static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+ static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+ static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+ static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+ static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
+ static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
+ static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
+ static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),
+ static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),
+ static_cast<T>(v30_), static_cast<T>(v31_), static_cast<T>(v32_),
+ static_cast<T>(v33_), static_cast<T>(v34_), static_cast<T>(v35_),
+ static_cast<T>(v36_), static_cast<T>(v37_), static_cast<T>(v38_),
+ static_cast<T>(v39_), static_cast<T>(v40_), static_cast<T>(v41_),
+ static_cast<T>(v42_), static_cast<T>(v43_), static_cast<T>(v44_),
+ static_cast<T>(v45_)};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray45& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+ const T25 v25_;
+ const T26 v26_;
+ const T27 v27_;
+ const T28 v28_;
+ const T29 v29_;
+ const T30 v30_;
+ const T31 v31_;
+ const T32 v32_;
+ const T33 v33_;
+ const T34 v34_;
+ const T35 v35_;
+ const T36 v36_;
+ const T37 v37_;
+ const T38 v38_;
+ const T39 v39_;
+ const T40 v40_;
+ const T41 v41_;
+ const T42 v42_;
+ const T43 v43_;
+ const T44 v44_;
+ const T45 v45_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44, typename T45,
+ typename T46>
+class ValueArray46 {
+ public:
+ ValueArray46(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+ T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41,
+ T42 v42, T43 v43, T44 v44, T45 v45, T46 v46) : v1_(v1), v2_(v2), v3_(v3),
+ v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10),
+ v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16),
+ v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22),
+ v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28),
+ v29_(v29), v30_(v30), v31_(v31), v32_(v32), v33_(v33), v34_(v34),
+ v35_(v35), v36_(v36), v37_(v37), v38_(v38), v39_(v39), v40_(v40),
+ v41_(v41), v42_(v42), v43_(v43), v44_(v44), v45_(v45), v46_(v46) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+ static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+ static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+ static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+ static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+ static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
+ static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
+ static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
+ static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),
+ static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),
+ static_cast<T>(v30_), static_cast<T>(v31_), static_cast<T>(v32_),
+ static_cast<T>(v33_), static_cast<T>(v34_), static_cast<T>(v35_),
+ static_cast<T>(v36_), static_cast<T>(v37_), static_cast<T>(v38_),
+ static_cast<T>(v39_), static_cast<T>(v40_), static_cast<T>(v41_),
+ static_cast<T>(v42_), static_cast<T>(v43_), static_cast<T>(v44_),
+ static_cast<T>(v45_), static_cast<T>(v46_)};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray46& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+ const T25 v25_;
+ const T26 v26_;
+ const T27 v27_;
+ const T28 v28_;
+ const T29 v29_;
+ const T30 v30_;
+ const T31 v31_;
+ const T32 v32_;
+ const T33 v33_;
+ const T34 v34_;
+ const T35 v35_;
+ const T36 v36_;
+ const T37 v37_;
+ const T38 v38_;
+ const T39 v39_;
+ const T40 v40_;
+ const T41 v41_;
+ const T42 v42_;
+ const T43 v43_;
+ const T44 v44_;
+ const T45 v45_;
+ const T46 v46_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44, typename T45,
+ typename T46, typename T47>
+class ValueArray47 {
+ public:
+ ValueArray47(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+ T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41,
+ T42 v42, T43 v43, T44 v44, T45 v45, T46 v46, T47 v47) : v1_(v1), v2_(v2),
+ v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10),
+ v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16),
+ v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22),
+ v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28),
+ v29_(v29), v30_(v30), v31_(v31), v32_(v32), v33_(v33), v34_(v34),
+ v35_(v35), v36_(v36), v37_(v37), v38_(v38), v39_(v39), v40_(v40),
+ v41_(v41), v42_(v42), v43_(v43), v44_(v44), v45_(v45), v46_(v46),
+ v47_(v47) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+ static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+ static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+ static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+ static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+ static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
+ static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
+ static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
+ static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),
+ static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),
+ static_cast<T>(v30_), static_cast<T>(v31_), static_cast<T>(v32_),
+ static_cast<T>(v33_), static_cast<T>(v34_), static_cast<T>(v35_),
+ static_cast<T>(v36_), static_cast<T>(v37_), static_cast<T>(v38_),
+ static_cast<T>(v39_), static_cast<T>(v40_), static_cast<T>(v41_),
+ static_cast<T>(v42_), static_cast<T>(v43_), static_cast<T>(v44_),
+ static_cast<T>(v45_), static_cast<T>(v46_), static_cast<T>(v47_)};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray47& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+ const T25 v25_;
+ const T26 v26_;
+ const T27 v27_;
+ const T28 v28_;
+ const T29 v29_;
+ const T30 v30_;
+ const T31 v31_;
+ const T32 v32_;
+ const T33 v33_;
+ const T34 v34_;
+ const T35 v35_;
+ const T36 v36_;
+ const T37 v37_;
+ const T38 v38_;
+ const T39 v39_;
+ const T40 v40_;
+ const T41 v41_;
+ const T42 v42_;
+ const T43 v43_;
+ const T44 v44_;
+ const T45 v45_;
+ const T46 v46_;
+ const T47 v47_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44, typename T45,
+ typename T46, typename T47, typename T48>
+class ValueArray48 {
+ public:
+ ValueArray48(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+ T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41,
+ T42 v42, T43 v43, T44 v44, T45 v45, T46 v46, T47 v47, T48 v48) : v1_(v1),
+ v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9),
+ v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15),
+ v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21),
+ v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27),
+ v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32), v33_(v33),
+ v34_(v34), v35_(v35), v36_(v36), v37_(v37), v38_(v38), v39_(v39),
+ v40_(v40), v41_(v41), v42_(v42), v43_(v43), v44_(v44), v45_(v45),
+ v46_(v46), v47_(v47), v48_(v48) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+ static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+ static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+ static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+ static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+ static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
+ static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
+ static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
+ static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),
+ static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),
+ static_cast<T>(v30_), static_cast<T>(v31_), static_cast<T>(v32_),
+ static_cast<T>(v33_), static_cast<T>(v34_), static_cast<T>(v35_),
+ static_cast<T>(v36_), static_cast<T>(v37_), static_cast<T>(v38_),
+ static_cast<T>(v39_), static_cast<T>(v40_), static_cast<T>(v41_),
+ static_cast<T>(v42_), static_cast<T>(v43_), static_cast<T>(v44_),
+ static_cast<T>(v45_), static_cast<T>(v46_), static_cast<T>(v47_),
+ static_cast<T>(v48_)};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray48& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+ const T25 v25_;
+ const T26 v26_;
+ const T27 v27_;
+ const T28 v28_;
+ const T29 v29_;
+ const T30 v30_;
+ const T31 v31_;
+ const T32 v32_;
+ const T33 v33_;
+ const T34 v34_;
+ const T35 v35_;
+ const T36 v36_;
+ const T37 v37_;
+ const T38 v38_;
+ const T39 v39_;
+ const T40 v40_;
+ const T41 v41_;
+ const T42 v42_;
+ const T43 v43_;
+ const T44 v44_;
+ const T45 v45_;
+ const T46 v46_;
+ const T47 v47_;
+ const T48 v48_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44, typename T45,
+ typename T46, typename T47, typename T48, typename T49>
+class ValueArray49 {
+ public:
+ ValueArray49(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+ T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41,
+ T42 v42, T43 v43, T44 v44, T45 v45, T46 v46, T47 v47, T48 v48,
+ T49 v49) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),
+ v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14),
+ v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20),
+ v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26),
+ v27_(v27), v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32),
+ v33_(v33), v34_(v34), v35_(v35), v36_(v36), v37_(v37), v38_(v38),
+ v39_(v39), v40_(v40), v41_(v41), v42_(v42), v43_(v43), v44_(v44),
+ v45_(v45), v46_(v46), v47_(v47), v48_(v48), v49_(v49) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+ static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+ static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+ static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+ static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+ static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
+ static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
+ static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
+ static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),
+ static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),
+ static_cast<T>(v30_), static_cast<T>(v31_), static_cast<T>(v32_),
+ static_cast<T>(v33_), static_cast<T>(v34_), static_cast<T>(v35_),
+ static_cast<T>(v36_), static_cast<T>(v37_), static_cast<T>(v38_),
+ static_cast<T>(v39_), static_cast<T>(v40_), static_cast<T>(v41_),
+ static_cast<T>(v42_), static_cast<T>(v43_), static_cast<T>(v44_),
+ static_cast<T>(v45_), static_cast<T>(v46_), static_cast<T>(v47_),
+ static_cast<T>(v48_), static_cast<T>(v49_)};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray49& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+ const T25 v25_;
+ const T26 v26_;
+ const T27 v27_;
+ const T28 v28_;
+ const T29 v29_;
+ const T30 v30_;
+ const T31 v31_;
+ const T32 v32_;
+ const T33 v33_;
+ const T34 v34_;
+ const T35 v35_;
+ const T36 v36_;
+ const T37 v37_;
+ const T38 v38_;
+ const T39 v39_;
+ const T40 v40_;
+ const T41 v41_;
+ const T42 v42_;
+ const T43 v43_;
+ const T44 v44_;
+ const T45 v45_;
+ const T46 v46_;
+ const T47 v47_;
+ const T48 v48_;
+ const T49 v49_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44, typename T45,
+ typename T46, typename T47, typename T48, typename T49, typename T50>
+class ValueArray50 {
+ public:
+ ValueArray50(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+ T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41,
+ T42 v42, T43 v43, T44 v44, T45 v45, T46 v46, T47 v47, T48 v48, T49 v49,
+ T50 v50) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),
+ v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14),
+ v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20),
+ v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26),
+ v27_(v27), v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32),
+ v33_(v33), v34_(v34), v35_(v35), v36_(v36), v37_(v37), v38_(v38),
+ v39_(v39), v40_(v40), v41_(v41), v42_(v42), v43_(v43), v44_(v44),
+ v45_(v45), v46_(v46), v47_(v47), v48_(v48), v49_(v49), v50_(v50) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+ static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+ static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+ static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+ static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+ static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
+ static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
+ static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
+ static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),
+ static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),
+ static_cast<T>(v30_), static_cast<T>(v31_), static_cast<T>(v32_),
+ static_cast<T>(v33_), static_cast<T>(v34_), static_cast<T>(v35_),
+ static_cast<T>(v36_), static_cast<T>(v37_), static_cast<T>(v38_),
+ static_cast<T>(v39_), static_cast<T>(v40_), static_cast<T>(v41_),
+ static_cast<T>(v42_), static_cast<T>(v43_), static_cast<T>(v44_),
+ static_cast<T>(v45_), static_cast<T>(v46_), static_cast<T>(v47_),
+ static_cast<T>(v48_), static_cast<T>(v49_), static_cast<T>(v50_)};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray50& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+ const T25 v25_;
+ const T26 v26_;
+ const T27 v27_;
+ const T28 v28_;
+ const T29 v29_;
+ const T30 v30_;
+ const T31 v31_;
+ const T32 v32_;
+ const T33 v33_;
+ const T34 v34_;
+ const T35 v35_;
+ const T36 v36_;
+ const T37 v37_;
+ const T38 v38_;
+ const T39 v39_;
+ const T40 v40_;
+ const T41 v41_;
+ const T42 v42_;
+ const T43 v43_;
+ const T44 v44_;
+ const T45 v45_;
+ const T46 v46_;
+ const T47 v47_;
+ const T48 v48_;
+ const T49 v49_;
+ const T50 v50_;
+};
+
+# if GTEST_HAS_COMBINE
+// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
+//
+// Generates values from the Cartesian product of values produced
+// by the argument generators.
+//
+template <typename T1, typename T2>
+class CartesianProductGenerator2
+ : public ParamGeneratorInterface< ::std::tr1::tuple<T1, T2> > {
+ public:
+ typedef ::std::tr1::tuple<T1, T2> ParamType;
+
+ CartesianProductGenerator2(const ParamGenerator<T1>& g1,
+ const ParamGenerator<T2>& g2)
+ : g1_(g1), g2_(g2) {}
+ virtual ~CartesianProductGenerator2() {}
+
+ virtual ParamIteratorInterface<ParamType>* Begin() const {
+ return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin());
+ }
+ virtual ParamIteratorInterface<ParamType>* End() const {
+ return new Iterator(this, g1_, g1_.end(), g2_, g2_.end());
+ }
+
+ private:
+ class Iterator : public ParamIteratorInterface<ParamType> {
+ public:
+ Iterator(const ParamGeneratorInterface<ParamType>* base,
+ const ParamGenerator<T1>& g1,
+ const typename ParamGenerator<T1>::iterator& current1,
+ const ParamGenerator<T2>& g2,
+ const typename ParamGenerator<T2>::iterator& current2)
+ : base_(base),
+ begin1_(g1.begin()), end1_(g1.end()), current1_(current1),
+ begin2_(g2.begin()), end2_(g2.end()), current2_(current2) {
+ ComputeCurrentValue();
+ }
+ virtual ~Iterator() {}
+
+ virtual const ParamGeneratorInterface<ParamType>* BaseGenerator() const {
+ return base_;
+ }
+ // Advance should not be called on beyond-of-range iterators
+ // so no component iterators must be beyond end of range, either.
+ virtual void Advance() {
+ assert(!AtEnd());
+ ++current2_;
+ if (current2_ == end2_) {
+ current2_ = begin2_;
+ ++current1_;
+ }
+ ComputeCurrentValue();
+ }
+ virtual ParamIteratorInterface<ParamType>* Clone() const {
+ return new Iterator(*this);
+ }
+ virtual const ParamType* Current() const { return &current_value_; }
+ virtual bool Equals(const ParamIteratorInterface<ParamType>& other) const {
+ // Having the same base generator guarantees that the other
+ // iterator is of the same type and we can downcast.
+ GTEST_CHECK_(BaseGenerator() == other.BaseGenerator())
+ << "The program attempted to compare iterators "
+ << "from different generators." << std::endl;
+ const Iterator* typed_other =
+ CheckedDowncastToActualType<const Iterator>(&other);
+ // We must report iterators equal if they both point beyond their
+ // respective ranges. That can happen in a variety of fashions,
+ // so we have to consult AtEnd().
+ return (AtEnd() && typed_other->AtEnd()) ||
+ (
+ current1_ == typed_other->current1_ &&
+ current2_ == typed_other->current2_);
+ }
+
+ private:
+ Iterator(const Iterator& other)
+ : base_(other.base_),
+ begin1_(other.begin1_),
+ end1_(other.end1_),
+ current1_(other.current1_),
+ begin2_(other.begin2_),
+ end2_(other.end2_),
+ current2_(other.current2_) {
+ ComputeCurrentValue();
+ }
+
+ void ComputeCurrentValue() {
+ if (!AtEnd())
+ current_value_ = ParamType(*current1_, *current2_);
+ }
+ bool AtEnd() const {
+ // We must report iterator past the end of the range when either of the
+ // component iterators has reached the end of its range.
+ return
+ current1_ == end1_ ||
+ current2_ == end2_;
+ }
+
+ // No implementation - assignment is unsupported.
+ void operator=(const Iterator& other);
+
+ const ParamGeneratorInterface<ParamType>* const base_;
+ // begin[i]_ and end[i]_ define the i-th range that Iterator traverses.
+ // current[i]_ is the actual traversing iterator.
+ const typename ParamGenerator<T1>::iterator begin1_;
+ const typename ParamGenerator<T1>::iterator end1_;
+ typename ParamGenerator<T1>::iterator current1_;
+ const typename ParamGenerator<T2>::iterator begin2_;
+ const typename ParamGenerator<T2>::iterator end2_;
+ typename ParamGenerator<T2>::iterator current2_;
+ ParamType current_value_;
+ }; // class CartesianProductGenerator2::Iterator
+
+ // No implementation - assignment is unsupported.
+ void operator=(const CartesianProductGenerator2& other);
+
+ const ParamGenerator<T1> g1_;
+ const ParamGenerator<T2> g2_;
+}; // class CartesianProductGenerator2
+
+
+template <typename T1, typename T2, typename T3>
+class CartesianProductGenerator3
+ : public ParamGeneratorInterface< ::std::tr1::tuple<T1, T2, T3> > {
+ public:
+ typedef ::std::tr1::tuple<T1, T2, T3> ParamType;
+
+ CartesianProductGenerator3(const ParamGenerator<T1>& g1,
+ const ParamGenerator<T2>& g2, const ParamGenerator<T3>& g3)
+ : g1_(g1), g2_(g2), g3_(g3) {}
+ virtual ~CartesianProductGenerator3() {}
+
+ virtual ParamIteratorInterface<ParamType>* Begin() const {
+ return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin(), g3_,
+ g3_.begin());
+ }
+ virtual ParamIteratorInterface<ParamType>* End() const {
+ return new Iterator(this, g1_, g1_.end(), g2_, g2_.end(), g3_, g3_.end());
+ }
+
+ private:
+ class Iterator : public ParamIteratorInterface<ParamType> {
+ public:
+ Iterator(const ParamGeneratorInterface<ParamType>* base,
+ const ParamGenerator<T1>& g1,
+ const typename ParamGenerator<T1>::iterator& current1,
+ const ParamGenerator<T2>& g2,
+ const typename ParamGenerator<T2>::iterator& current2,
+ const ParamGenerator<T3>& g3,
+ const typename ParamGenerator<T3>::iterator& current3)
+ : base_(base),
+ begin1_(g1.begin()), end1_(g1.end()), current1_(current1),
+ begin2_(g2.begin()), end2_(g2.end()), current2_(current2),
+ begin3_(g3.begin()), end3_(g3.end()), current3_(current3) {
+ ComputeCurrentValue();
+ }
+ virtual ~Iterator() {}
+
+ virtual const ParamGeneratorInterface<ParamType>* BaseGenerator() const {
+ return base_;
+ }
+ // Advance should not be called on beyond-of-range iterators
+ // so no component iterators must be beyond end of range, either.
+ virtual void Advance() {
+ assert(!AtEnd());
+ ++current3_;
+ if (current3_ == end3_) {
+ current3_ = begin3_;
+ ++current2_;
+ }
+ if (current2_ == end2_) {
+ current2_ = begin2_;
+ ++current1_;
+ }
+ ComputeCurrentValue();
+ }
+ virtual ParamIteratorInterface<ParamType>* Clone() const {
+ return new Iterator(*this);
+ }
+ virtual const ParamType* Current() const { return &current_value_; }
+ virtual bool Equals(const ParamIteratorInterface<ParamType>& other) const {
+ // Having the same base generator guarantees that the other
+ // iterator is of the same type and we can downcast.
+ GTEST_CHECK_(BaseGenerator() == other.BaseGenerator())
+ << "The program attempted to compare iterators "
+ << "from different generators." << std::endl;
+ const Iterator* typed_other =
+ CheckedDowncastToActualType<const Iterator>(&other);
+ // We must report iterators equal if they both point beyond their
+ // respective ranges. That can happen in a variety of fashions,
+ // so we have to consult AtEnd().
+ return (AtEnd() && typed_other->AtEnd()) ||
+ (
+ current1_ == typed_other->current1_ &&
+ current2_ == typed_other->current2_ &&
+ current3_ == typed_other->current3_);
+ }
+
+ private:
+ Iterator(const Iterator& other)
+ : base_(other.base_),
+ begin1_(other.begin1_),
+ end1_(other.end1_),
+ current1_(other.current1_),
+ begin2_(other.begin2_),
+ end2_(other.end2_),
+ current2_(other.current2_),
+ begin3_(other.begin3_),
+ end3_(other.end3_),
+ current3_(other.current3_) {
+ ComputeCurrentValue();
+ }
+
+ void ComputeCurrentValue() {
+ if (!AtEnd())
+ current_value_ = ParamType(*current1_, *current2_, *current3_);
+ }
+ bool AtEnd() const {
+ // We must report iterator past the end of the range when either of the
+ // component iterators has reached the end of its range.
+ return
+ current1_ == end1_ ||
+ current2_ == end2_ ||
+ current3_ == end3_;
+ }
+
+ // No implementation - assignment is unsupported.
+ void operator=(const Iterator& other);
+
+ const ParamGeneratorInterface<ParamType>* const base_;
+ // begin[i]_ and end[i]_ define the i-th range that Iterator traverses.
+ // current[i]_ is the actual traversing iterator.
+ const typename ParamGenerator<T1>::iterator begin1_;
+ const typename ParamGenerator<T1>::iterator end1_;
+ typename ParamGenerator<T1>::iterator current1_;
+ const typename ParamGenerator<T2>::iterator begin2_;
+ const typename ParamGenerator<T2>::iterator end2_;
+ typename ParamGenerator<T2>::iterator current2_;
+ const typename ParamGenerator<T3>::iterator begin3_;
+ const typename ParamGenerator<T3>::iterator end3_;
+ typename ParamGenerator<T3>::iterator current3_;
+ ParamType current_value_;
+ }; // class CartesianProductGenerator3::Iterator
+
+ // No implementation - assignment is unsupported.
+ void operator=(const CartesianProductGenerator3& other);
+
+ const ParamGenerator<T1> g1_;
+ const ParamGenerator<T2> g2_;
+ const ParamGenerator<T3> g3_;
+}; // class CartesianProductGenerator3
+
+
+template <typename T1, typename T2, typename T3, typename T4>
+class CartesianProductGenerator4
+ : public ParamGeneratorInterface< ::std::tr1::tuple<T1, T2, T3, T4> > {
+ public:
+ typedef ::std::tr1::tuple<T1, T2, T3, T4> ParamType;
+
+ CartesianProductGenerator4(const ParamGenerator<T1>& g1,
+ const ParamGenerator<T2>& g2, const ParamGenerator<T3>& g3,
+ const ParamGenerator<T4>& g4)
+ : g1_(g1), g2_(g2), g3_(g3), g4_(g4) {}
+ virtual ~CartesianProductGenerator4() {}
+
+ virtual ParamIteratorInterface<ParamType>* Begin() const {
+ return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin(), g3_,
+ g3_.begin(), g4_, g4_.begin());
+ }
+ virtual ParamIteratorInterface<ParamType>* End() const {
+ return new Iterator(this, g1_, g1_.end(), g2_, g2_.end(), g3_, g3_.end(),
+ g4_, g4_.end());
+ }
+
+ private:
+ class Iterator : public ParamIteratorInterface<ParamType> {
+ public:
+ Iterator(const ParamGeneratorInterface<ParamType>* base,
+ const ParamGenerator<T1>& g1,
+ const typename ParamGenerator<T1>::iterator& current1,
+ const ParamGenerator<T2>& g2,
+ const typename ParamGenerator<T2>::iterator& current2,
+ const ParamGenerator<T3>& g3,
+ const typename ParamGenerator<T3>::iterator& current3,
+ const ParamGenerator<T4>& g4,
+ const typename ParamGenerator<T4>::iterator& current4)
+ : base_(base),
+ begin1_(g1.begin()), end1_(g1.end()), current1_(current1),
+ begin2_(g2.begin()), end2_(g2.end()), current2_(current2),
+ begin3_(g3.begin()), end3_(g3.end()), current3_(current3),
+ begin4_(g4.begin()), end4_(g4.end()), current4_(current4) {
+ ComputeCurrentValue();
+ }
+ virtual ~Iterator() {}
+
+ virtual const ParamGeneratorInterface<ParamType>* BaseGenerator() const {
+ return base_;
+ }
+ // Advance should not be called on beyond-of-range iterators
+ // so no component iterators must be beyond end of range, either.
+ virtual void Advance() {
+ assert(!AtEnd());
+ ++current4_;
+ if (current4_ == end4_) {
+ current4_ = begin4_;
+ ++current3_;
+ }
+ if (current3_ == end3_) {
+ current3_ = begin3_;
+ ++current2_;
+ }
+ if (current2_ == end2_) {
+ current2_ = begin2_;
+ ++current1_;
+ }
+ ComputeCurrentValue();
+ }
+ virtual ParamIteratorInterface<ParamType>* Clone() const {
+ return new Iterator(*this);
+ }
+ virtual const ParamType* Current() const { return &current_value_; }
+ virtual bool Equals(const ParamIteratorInterface<ParamType>& other) const {
+ // Having the same base generator guarantees that the other
+ // iterator is of the same type and we can downcast.
+ GTEST_CHECK_(BaseGenerator() == other.BaseGenerator())
+ << "The program attempted to compare iterators "
+ << "from different generators." << std::endl;
+ const Iterator* typed_other =
+ CheckedDowncastToActualType<const Iterator>(&other);
+ // We must report iterators equal if they both point beyond their
+ // respective ranges. That can happen in a variety of fashions,
+ // so we have to consult AtEnd().
+ return (AtEnd() && typed_other->AtEnd()) ||
+ (
+ current1_ == typed_other->current1_ &&
+ current2_ == typed_other->current2_ &&
+ current3_ == typed_other->current3_ &&
+ current4_ == typed_other->current4_);
+ }
+
+ private:
+ Iterator(const Iterator& other)
+ : base_(other.base_),
+ begin1_(other.begin1_),
+ end1_(other.end1_),
+ current1_(other.current1_),
+ begin2_(other.begin2_),
+ end2_(other.end2_),
+ current2_(other.current2_),
+ begin3_(other.begin3_),
+ end3_(other.end3_),
+ current3_(other.current3_),
+ begin4_(other.begin4_),
+ end4_(other.end4_),
+ current4_(other.current4_) {
+ ComputeCurrentValue();
+ }
+
+ void ComputeCurrentValue() {
+ if (!AtEnd())
+ current_value_ = ParamType(*current1_, *current2_, *current3_,
+ *current4_);
+ }
+ bool AtEnd() const {
+ // We must report iterator past the end of the range when either of the
+ // component iterators has reached the end of its range.
+ return
+ current1_ == end1_ ||
+ current2_ == end2_ ||
+ current3_ == end3_ ||
+ current4_ == end4_;
+ }
+
+ // No implementation - assignment is unsupported.
+ void operator=(const Iterator& other);
+
+ const ParamGeneratorInterface<ParamType>* const base_;
+ // begin[i]_ and end[i]_ define the i-th range that Iterator traverses.
+ // current[i]_ is the actual traversing iterator.
+ const typename ParamGenerator<T1>::iterator begin1_;
+ const typename ParamGenerator<T1>::iterator end1_;
+ typename ParamGenerator<T1>::iterator current1_;
+ const typename ParamGenerator<T2>::iterator begin2_;
+ const typename ParamGenerator<T2>::iterator end2_;
+ typename ParamGenerator<T2>::iterator current2_;
+ const typename ParamGenerator<T3>::iterator begin3_;
+ const typename ParamGenerator<T3>::iterator end3_;
+ typename ParamGenerator<T3>::iterator current3_;
+ const typename ParamGenerator<T4>::iterator begin4_;
+ const typename ParamGenerator<T4>::iterator end4_;
+ typename ParamGenerator<T4>::iterator current4_;
+ ParamType current_value_;
+ }; // class CartesianProductGenerator4::Iterator
+
+ // No implementation - assignment is unsupported.
+ void operator=(const CartesianProductGenerator4& other);
+
+ const ParamGenerator<T1> g1_;
+ const ParamGenerator<T2> g2_;
+ const ParamGenerator<T3> g3_;
+ const ParamGenerator<T4> g4_;
+}; // class CartesianProductGenerator4
+
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5>
+class CartesianProductGenerator5
+ : public ParamGeneratorInterface< ::std::tr1::tuple<T1, T2, T3, T4, T5> > {
+ public:
+ typedef ::std::tr1::tuple<T1, T2, T3, T4, T5> ParamType;
+
+ CartesianProductGenerator5(const ParamGenerator<T1>& g1,
+ const ParamGenerator<T2>& g2, const ParamGenerator<T3>& g3,
+ const ParamGenerator<T4>& g4, const ParamGenerator<T5>& g5)
+ : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5) {}
+ virtual ~CartesianProductGenerator5() {}
+
+ virtual ParamIteratorInterface<ParamType>* Begin() const {
+ return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin(), g3_,
+ g3_.begin(), g4_, g4_.begin(), g5_, g5_.begin());
+ }
+ virtual ParamIteratorInterface<ParamType>* End() const {
+ return new Iterator(this, g1_, g1_.end(), g2_, g2_.end(), g3_, g3_.end(),
+ g4_, g4_.end(), g5_, g5_.end());
+ }
+
+ private:
+ class Iterator : public ParamIteratorInterface<ParamType> {
+ public:
+ Iterator(const ParamGeneratorInterface<ParamType>* base,
+ const ParamGenerator<T1>& g1,
+ const typename ParamGenerator<T1>::iterator& current1,
+ const ParamGenerator<T2>& g2,
+ const typename ParamGenerator<T2>::iterator& current2,
+ const ParamGenerator<T3>& g3,
+ const typename ParamGenerator<T3>::iterator& current3,
+ const ParamGenerator<T4>& g4,
+ const typename ParamGenerator<T4>::iterator& current4,
+ const ParamGenerator<T5>& g5,
+ const typename ParamGenerator<T5>::iterator& current5)
+ : base_(base),
+ begin1_(g1.begin()), end1_(g1.end()), current1_(current1),
+ begin2_(g2.begin()), end2_(g2.end()), current2_(current2),
+ begin3_(g3.begin()), end3_(g3.end()), current3_(current3),
+ begin4_(g4.begin()), end4_(g4.end()), current4_(current4),
+ begin5_(g5.begin()), end5_(g5.end()), current5_(current5) {
+ ComputeCurrentValue();
+ }
+ virtual ~Iterator() {}
+
+ virtual const ParamGeneratorInterface<ParamType>* BaseGenerator() const {
+ return base_;
+ }
+ // Advance should not be called on beyond-of-range iterators
+ // so no component iterators must be beyond end of range, either.
+ virtual void Advance() {
+ assert(!AtEnd());
+ ++current5_;
+ if (current5_ == end5_) {
+ current5_ = begin5_;
+ ++current4_;
+ }
+ if (current4_ == end4_) {
+ current4_ = begin4_;
+ ++current3_;
+ }
+ if (current3_ == end3_) {
+ current3_ = begin3_;
+ ++current2_;
+ }
+ if (current2_ == end2_) {
+ current2_ = begin2_;
+ ++current1_;
+ }
+ ComputeCurrentValue();
+ }
+ virtual ParamIteratorInterface<ParamType>* Clone() const {
+ return new Iterator(*this);
+ }
+ virtual const ParamType* Current() const { return &current_value_; }
+ virtual bool Equals(const ParamIteratorInterface<ParamType>& other) const {
+ // Having the same base generator guarantees that the other
+ // iterator is of the same type and we can downcast.
+ GTEST_CHECK_(BaseGenerator() == other.BaseGenerator())
+ << "The program attempted to compare iterators "
+ << "from different generators." << std::endl;
+ const Iterator* typed_other =
+ CheckedDowncastToActualType<const Iterator>(&other);
+ // We must report iterators equal if they both point beyond their
+ // respective ranges. That can happen in a variety of fashions,
+ // so we have to consult AtEnd().
+ return (AtEnd() && typed_other->AtEnd()) ||
+ (
+ current1_ == typed_other->current1_ &&
+ current2_ == typed_other->current2_ &&
+ current3_ == typed_other->current3_ &&
+ current4_ == typed_other->current4_ &&
+ current5_ == typed_other->current5_);
+ }
+
+ private:
+ Iterator(const Iterator& other)
+ : base_(other.base_),
+ begin1_(other.begin1_),
+ end1_(other.end1_),
+ current1_(other.current1_),
+ begin2_(other.begin2_),
+ end2_(other.end2_),
+ current2_(other.current2_),
+ begin3_(other.begin3_),
+ end3_(other.end3_),
+ current3_(other.current3_),
+ begin4_(other.begin4_),
+ end4_(other.end4_),
+ current4_(other.current4_),
+ begin5_(other.begin5_),
+ end5_(other.end5_),
+ current5_(other.current5_) {
+ ComputeCurrentValue();
+ }
+
+ void ComputeCurrentValue() {
+ if (!AtEnd())
+ current_value_ = ParamType(*current1_, *current2_, *current3_,
+ *current4_, *current5_);
+ }
+ bool AtEnd() const {
+ // We must report iterator past the end of the range when either of the
+ // component iterators has reached the end of its range.
+ return
+ current1_ == end1_ ||
+ current2_ == end2_ ||
+ current3_ == end3_ ||
+ current4_ == end4_ ||
+ current5_ == end5_;
+ }
+
+ // No implementation - assignment is unsupported.
+ void operator=(const Iterator& other);
+
+ const ParamGeneratorInterface<ParamType>* const base_;
+ // begin[i]_ and end[i]_ define the i-th range that Iterator traverses.
+ // current[i]_ is the actual traversing iterator.
+ const typename ParamGenerator<T1>::iterator begin1_;
+ const typename ParamGenerator<T1>::iterator end1_;
+ typename ParamGenerator<T1>::iterator current1_;
+ const typename ParamGenerator<T2>::iterator begin2_;
+ const typename ParamGenerator<T2>::iterator end2_;
+ typename ParamGenerator<T2>::iterator current2_;
+ const typename ParamGenerator<T3>::iterator begin3_;
+ const typename ParamGenerator<T3>::iterator end3_;
+ typename ParamGenerator<T3>::iterator current3_;
+ const typename ParamGenerator<T4>::iterator begin4_;
+ const typename ParamGenerator<T4>::iterator end4_;
+ typename ParamGenerator<T4>::iterator current4_;
+ const typename ParamGenerator<T5>::iterator begin5_;
+ const typename ParamGenerator<T5>::iterator end5_;
+ typename ParamGenerator<T5>::iterator current5_;
+ ParamType current_value_;
+ }; // class CartesianProductGenerator5::Iterator
+
+ // No implementation - assignment is unsupported.
+ void operator=(const CartesianProductGenerator5& other);
+
+ const ParamGenerator<T1> g1_;
+ const ParamGenerator<T2> g2_;
+ const ParamGenerator<T3> g3_;
+ const ParamGenerator<T4> g4_;
+ const ParamGenerator<T5> g5_;
+}; // class CartesianProductGenerator5
+
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6>
+class CartesianProductGenerator6
+ : public ParamGeneratorInterface< ::std::tr1::tuple<T1, T2, T3, T4, T5,
+ T6> > {
+ public:
+ typedef ::std::tr1::tuple<T1, T2, T3, T4, T5, T6> ParamType;
+
+ CartesianProductGenerator6(const ParamGenerator<T1>& g1,
+ const ParamGenerator<T2>& g2, const ParamGenerator<T3>& g3,
+ const ParamGenerator<T4>& g4, const ParamGenerator<T5>& g5,
+ const ParamGenerator<T6>& g6)
+ : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6) {}
+ virtual ~CartesianProductGenerator6() {}
+
+ virtual ParamIteratorInterface<ParamType>* Begin() const {
+ return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin(), g3_,
+ g3_.begin(), g4_, g4_.begin(), g5_, g5_.begin(), g6_, g6_.begin());
+ }
+ virtual ParamIteratorInterface<ParamType>* End() const {
+ return new Iterator(this, g1_, g1_.end(), g2_, g2_.end(), g3_, g3_.end(),
+ g4_, g4_.end(), g5_, g5_.end(), g6_, g6_.end());
+ }
+
+ private:
+ class Iterator : public ParamIteratorInterface<ParamType> {
+ public:
+ Iterator(const ParamGeneratorInterface<ParamType>* base,
+ const ParamGenerator<T1>& g1,
+ const typename ParamGenerator<T1>::iterator& current1,
+ const ParamGenerator<T2>& g2,
+ const typename ParamGenerator<T2>::iterator& current2,
+ const ParamGenerator<T3>& g3,
+ const typename ParamGenerator<T3>::iterator& current3,
+ const ParamGenerator<T4>& g4,
+ const typename ParamGenerator<T4>::iterator& current4,
+ const ParamGenerator<T5>& g5,
+ const typename ParamGenerator<T5>::iterator& current5,
+ const ParamGenerator<T6>& g6,
+ const typename ParamGenerator<T6>::iterator& current6)
+ : base_(base),
+ begin1_(g1.begin()), end1_(g1.end()), current1_(current1),
+ begin2_(g2.begin()), end2_(g2.end()), current2_(current2),
+ begin3_(g3.begin()), end3_(g3.end()), current3_(current3),
+ begin4_(g4.begin()), end4_(g4.end()), current4_(current4),
+ begin5_(g5.begin()), end5_(g5.end()), current5_(current5),
+ begin6_(g6.begin()), end6_(g6.end()), current6_(current6) {
+ ComputeCurrentValue();
+ }
+ virtual ~Iterator() {}
+
+ virtual const ParamGeneratorInterface<ParamType>* BaseGenerator() const {
+ return base_;
+ }
+ // Advance should not be called on beyond-of-range iterators
+ // so no component iterators must be beyond end of range, either.
+ virtual void Advance() {
+ assert(!AtEnd());
+ ++current6_;
+ if (current6_ == end6_) {
+ current6_ = begin6_;
+ ++current5_;
+ }
+ if (current5_ == end5_) {
+ current5_ = begin5_;
+ ++current4_;
+ }
+ if (current4_ == end4_) {
+ current4_ = begin4_;
+ ++current3_;
+ }
+ if (current3_ == end3_) {
+ current3_ = begin3_;
+ ++current2_;
+ }
+ if (current2_ == end2_) {
+ current2_ = begin2_;
+ ++current1_;
+ }
+ ComputeCurrentValue();
+ }
+ virtual ParamIteratorInterface<ParamType>* Clone() const {
+ return new Iterator(*this);
+ }
+ virtual const ParamType* Current() const { return &current_value_; }
+ virtual bool Equals(const ParamIteratorInterface<ParamType>& other) const {
+ // Having the same base generator guarantees that the other
+ // iterator is of the same type and we can downcast.
+ GTEST_CHECK_(BaseGenerator() == other.BaseGenerator())
+ << "The program attempted to compare iterators "
+ << "from different generators." << std::endl;
+ const Iterator* typed_other =
+ CheckedDowncastToActualType<const Iterator>(&other);
+ // We must report iterators equal if they both point beyond their
+ // respective ranges. That can happen in a variety of fashions,
+ // so we have to consult AtEnd().
+ return (AtEnd() && typed_other->AtEnd()) ||
+ (
+ current1_ == typed_other->current1_ &&
+ current2_ == typed_other->current2_ &&
+ current3_ == typed_other->current3_ &&
+ current4_ == typed_other->current4_ &&
+ current5_ == typed_other->current5_ &&
+ current6_ == typed_other->current6_);
+ }
+
+ private:
+ Iterator(const Iterator& other)
+ : base_(other.base_),
+ begin1_(other.begin1_),
+ end1_(other.end1_),
+ current1_(other.current1_),
+ begin2_(other.begin2_),
+ end2_(other.end2_),
+ current2_(other.current2_),
+ begin3_(other.begin3_),
+ end3_(other.end3_),
+ current3_(other.current3_),
+ begin4_(other.begin4_),
+ end4_(other.end4_),
+ current4_(other.current4_),
+ begin5_(other.begin5_),
+ end5_(other.end5_),
+ current5_(other.current5_),
+ begin6_(other.begin6_),
+ end6_(other.end6_),
+ current6_(other.current6_) {
+ ComputeCurrentValue();
+ }
+
+ void ComputeCurrentValue() {
+ if (!AtEnd())
+ current_value_ = ParamType(*current1_, *current2_, *current3_,
+ *current4_, *current5_, *current6_);
+ }
+ bool AtEnd() const {
+ // We must report iterator past the end of the range when either of the
+ // component iterators has reached the end of its range.
+ return
+ current1_ == end1_ ||
+ current2_ == end2_ ||
+ current3_ == end3_ ||
+ current4_ == end4_ ||
+ current5_ == end5_ ||
+ current6_ == end6_;
+ }
+
+ // No implementation - assignment is unsupported.
+ void operator=(const Iterator& other);
+
+ const ParamGeneratorInterface<ParamType>* const base_;
+ // begin[i]_ and end[i]_ define the i-th range that Iterator traverses.
+ // current[i]_ is the actual traversing iterator.
+ const typename ParamGenerator<T1>::iterator begin1_;
+ const typename ParamGenerator<T1>::iterator end1_;
+ typename ParamGenerator<T1>::iterator current1_;
+ const typename ParamGenerator<T2>::iterator begin2_;
+ const typename ParamGenerator<T2>::iterator end2_;
+ typename ParamGenerator<T2>::iterator current2_;
+ const typename ParamGenerator<T3>::iterator begin3_;
+ const typename ParamGenerator<T3>::iterator end3_;
+ typename ParamGenerator<T3>::iterator current3_;
+ const typename ParamGenerator<T4>::iterator begin4_;
+ const typename ParamGenerator<T4>::iterator end4_;
+ typename ParamGenerator<T4>::iterator current4_;
+ const typename ParamGenerator<T5>::iterator begin5_;
+ const typename ParamGenerator<T5>::iterator end5_;
+ typename ParamGenerator<T5>::iterator current5_;
+ const typename ParamGenerator<T6>::iterator begin6_;
+ const typename ParamGenerator<T6>::iterator end6_;
+ typename ParamGenerator<T6>::iterator current6_;
+ ParamType current_value_;
+ }; // class CartesianProductGenerator6::Iterator
+
+ // No implementation - assignment is unsupported.
+ void operator=(const CartesianProductGenerator6& other);
+
+ const ParamGenerator<T1> g1_;
+ const ParamGenerator<T2> g2_;
+ const ParamGenerator<T3> g3_;
+ const ParamGenerator<T4> g4_;
+ const ParamGenerator<T5> g5_;
+ const ParamGenerator<T6> g6_;
+}; // class CartesianProductGenerator6
+
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7>
+class CartesianProductGenerator7
+ : public ParamGeneratorInterface< ::std::tr1::tuple<T1, T2, T3, T4, T5, T6,
+ T7> > {
+ public:
+ typedef ::std::tr1::tuple<T1, T2, T3, T4, T5, T6, T7> ParamType;
+
+ CartesianProductGenerator7(const ParamGenerator<T1>& g1,
+ const ParamGenerator<T2>& g2, const ParamGenerator<T3>& g3,
+ const ParamGenerator<T4>& g4, const ParamGenerator<T5>& g5,
+ const ParamGenerator<T6>& g6, const ParamGenerator<T7>& g7)
+ : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6), g7_(g7) {}
+ virtual ~CartesianProductGenerator7() {}
+
+ virtual ParamIteratorInterface<ParamType>* Begin() const {
+ return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin(), g3_,
+ g3_.begin(), g4_, g4_.begin(), g5_, g5_.begin(), g6_, g6_.begin(), g7_,
+ g7_.begin());
+ }
+ virtual ParamIteratorInterface<ParamType>* End() const {
+ return new Iterator(this, g1_, g1_.end(), g2_, g2_.end(), g3_, g3_.end(),
+ g4_, g4_.end(), g5_, g5_.end(), g6_, g6_.end(), g7_, g7_.end());
+ }
+
+ private:
+ class Iterator : public ParamIteratorInterface<ParamType> {
+ public:
+ Iterator(const ParamGeneratorInterface<ParamType>* base,
+ const ParamGenerator<T1>& g1,
+ const typename ParamGenerator<T1>::iterator& current1,
+ const ParamGenerator<T2>& g2,
+ const typename ParamGenerator<T2>::iterator& current2,
+ const ParamGenerator<T3>& g3,
+ const typename ParamGenerator<T3>::iterator& current3,
+ const ParamGenerator<T4>& g4,
+ const typename ParamGenerator<T4>::iterator& current4,
+ const ParamGenerator<T5>& g5,
+ const typename ParamGenerator<T5>::iterator& current5,
+ const ParamGenerator<T6>& g6,
+ const typename ParamGenerator<T6>::iterator& current6,
+ const ParamGenerator<T7>& g7,
+ const typename ParamGenerator<T7>::iterator& current7)
+ : base_(base),
+ begin1_(g1.begin()), end1_(g1.end()), current1_(current1),
+ begin2_(g2.begin()), end2_(g2.end()), current2_(current2),
+ begin3_(g3.begin()), end3_(g3.end()), current3_(current3),
+ begin4_(g4.begin()), end4_(g4.end()), current4_(current4),
+ begin5_(g5.begin()), end5_(g5.end()), current5_(current5),
+ begin6_(g6.begin()), end6_(g6.end()), current6_(current6),
+ begin7_(g7.begin()), end7_(g7.end()), current7_(current7) {
+ ComputeCurrentValue();
+ }
+ virtual ~Iterator() {}
+
+ virtual const ParamGeneratorInterface<ParamType>* BaseGenerator() const {
+ return base_;
+ }
+ // Advance should not be called on beyond-of-range iterators
+ // so no component iterators must be beyond end of range, either.
+ virtual void Advance() {
+ assert(!AtEnd());
+ ++current7_;
+ if (current7_ == end7_) {
+ current7_ = begin7_;
+ ++current6_;
+ }
+ if (current6_ == end6_) {
+ current6_ = begin6_;
+ ++current5_;
+ }
+ if (current5_ == end5_) {
+ current5_ = begin5_;
+ ++current4_;
+ }
+ if (current4_ == end4_) {
+ current4_ = begin4_;
+ ++current3_;
+ }
+ if (current3_ == end3_) {
+ current3_ = begin3_;
+ ++current2_;
+ }
+ if (current2_ == end2_) {
+ current2_ = begin2_;
+ ++current1_;
+ }
+ ComputeCurrentValue();
+ }
+ virtual ParamIteratorInterface<ParamType>* Clone() const {
+ return new Iterator(*this);
+ }
+ virtual const ParamType* Current() const { return &current_value_; }
+ virtual bool Equals(const ParamIteratorInterface<ParamType>& other) const {
+ // Having the same base generator guarantees that the other
+ // iterator is of the same type and we can downcast.
+ GTEST_CHECK_(BaseGenerator() == other.BaseGenerator())
+ << "The program attempted to compare iterators "
+ << "from different generators." << std::endl;
+ const Iterator* typed_other =
+ CheckedDowncastToActualType<const Iterator>(&other);
+ // We must report iterators equal if they both point beyond their
+ // respective ranges. That can happen in a variety of fashions,
+ // so we have to consult AtEnd().
+ return (AtEnd() && typed_other->AtEnd()) ||
+ (
+ current1_ == typed_other->current1_ &&
+ current2_ == typed_other->current2_ &&
+ current3_ == typed_other->current3_ &&
+ current4_ == typed_other->current4_ &&
+ current5_ == typed_other->current5_ &&
+ current6_ == typed_other->current6_ &&
+ current7_ == typed_other->current7_);
+ }
+
+ private:
+ Iterator(const Iterator& other)
+ : base_(other.base_),
+ begin1_(other.begin1_),
+ end1_(other.end1_),
+ current1_(other.current1_),
+ begin2_(other.begin2_),
+ end2_(other.end2_),
+ current2_(other.current2_),
+ begin3_(other.begin3_),
+ end3_(other.end3_),
+ current3_(other.current3_),
+ begin4_(other.begin4_),
+ end4_(other.end4_),
+ current4_(other.current4_),
+ begin5_(other.begin5_),
+ end5_(other.end5_),
+ current5_(other.current5_),
+ begin6_(other.begin6_),
+ end6_(other.end6_),
+ current6_(other.current6_),
+ begin7_(other.begin7_),
+ end7_(other.end7_),
+ current7_(other.current7_) {
+ ComputeCurrentValue();
+ }
+
+ void ComputeCurrentValue() {
+ if (!AtEnd())
+ current_value_ = ParamType(*current1_, *current2_, *current3_,
+ *current4_, *current5_, *current6_, *current7_);
+ }
+ bool AtEnd() const {
+ // We must report iterator past the end of the range when either of the
+ // component iterators has reached the end of its range.
+ return
+ current1_ == end1_ ||
+ current2_ == end2_ ||
+ current3_ == end3_ ||
+ current4_ == end4_ ||
+ current5_ == end5_ ||
+ current6_ == end6_ ||
+ current7_ == end7_;
+ }
+
+ // No implementation - assignment is unsupported.
+ void operator=(const Iterator& other);
+
+ const ParamGeneratorInterface<ParamType>* const base_;
+ // begin[i]_ and end[i]_ define the i-th range that Iterator traverses.
+ // current[i]_ is the actual traversing iterator.
+ const typename ParamGenerator<T1>::iterator begin1_;
+ const typename ParamGenerator<T1>::iterator end1_;
+ typename ParamGenerator<T1>::iterator current1_;
+ const typename ParamGenerator<T2>::iterator begin2_;
+ const typename ParamGenerator<T2>::iterator end2_;
+ typename ParamGenerator<T2>::iterator current2_;
+ const typename ParamGenerator<T3>::iterator begin3_;
+ const typename ParamGenerator<T3>::iterator end3_;
+ typename ParamGenerator<T3>::iterator current3_;
+ const typename ParamGenerator<T4>::iterator begin4_;
+ const typename ParamGenerator<T4>::iterator end4_;
+ typename ParamGenerator<T4>::iterator current4_;
+ const typename ParamGenerator<T5>::iterator begin5_;
+ const typename ParamGenerator<T5>::iterator end5_;
+ typename ParamGenerator<T5>::iterator current5_;
+ const typename ParamGenerator<T6>::iterator begin6_;
+ const typename ParamGenerator<T6>::iterator end6_;
+ typename ParamGenerator<T6>::iterator current6_;
+ const typename ParamGenerator<T7>::iterator begin7_;
+ const typename ParamGenerator<T7>::iterator end7_;
+ typename ParamGenerator<T7>::iterator current7_;
+ ParamType current_value_;
+ }; // class CartesianProductGenerator7::Iterator
+
+ // No implementation - assignment is unsupported.
+ void operator=(const CartesianProductGenerator7& other);
+
+ const ParamGenerator<T1> g1_;
+ const ParamGenerator<T2> g2_;
+ const ParamGenerator<T3> g3_;
+ const ParamGenerator<T4> g4_;
+ const ParamGenerator<T5> g5_;
+ const ParamGenerator<T6> g6_;
+ const ParamGenerator<T7> g7_;
+}; // class CartesianProductGenerator7
+
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8>
+class CartesianProductGenerator8
+ : public ParamGeneratorInterface< ::std::tr1::tuple<T1, T2, T3, T4, T5, T6,
+ T7, T8> > {
+ public:
+ typedef ::std::tr1::tuple<T1, T2, T3, T4, T5, T6, T7, T8> ParamType;
+
+ CartesianProductGenerator8(const ParamGenerator<T1>& g1,
+ const ParamGenerator<T2>& g2, const ParamGenerator<T3>& g3,
+ const ParamGenerator<T4>& g4, const ParamGenerator<T5>& g5,
+ const ParamGenerator<T6>& g6, const ParamGenerator<T7>& g7,
+ const ParamGenerator<T8>& g8)
+ : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6), g7_(g7),
+ g8_(g8) {}
+ virtual ~CartesianProductGenerator8() {}
+
+ virtual ParamIteratorInterface<ParamType>* Begin() const {
+ return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin(), g3_,
+ g3_.begin(), g4_, g4_.begin(), g5_, g5_.begin(), g6_, g6_.begin(), g7_,
+ g7_.begin(), g8_, g8_.begin());
+ }
+ virtual ParamIteratorInterface<ParamType>* End() const {
+ return new Iterator(this, g1_, g1_.end(), g2_, g2_.end(), g3_, g3_.end(),
+ g4_, g4_.end(), g5_, g5_.end(), g6_, g6_.end(), g7_, g7_.end(), g8_,
+ g8_.end());
+ }
+
+ private:
+ class Iterator : public ParamIteratorInterface<ParamType> {
+ public:
+ Iterator(const ParamGeneratorInterface<ParamType>* base,
+ const ParamGenerator<T1>& g1,
+ const typename ParamGenerator<T1>::iterator& current1,
+ const ParamGenerator<T2>& g2,
+ const typename ParamGenerator<T2>::iterator& current2,
+ const ParamGenerator<T3>& g3,
+ const typename ParamGenerator<T3>::iterator& current3,
+ const ParamGenerator<T4>& g4,
+ const typename ParamGenerator<T4>::iterator& current4,
+ const ParamGenerator<T5>& g5,
+ const typename ParamGenerator<T5>::iterator& current5,
+ const ParamGenerator<T6>& g6,
+ const typename ParamGenerator<T6>::iterator& current6,
+ const ParamGenerator<T7>& g7,
+ const typename ParamGenerator<T7>::iterator& current7,
+ const ParamGenerator<T8>& g8,
+ const typename ParamGenerator<T8>::iterator& current8)
+ : base_(base),
+ begin1_(g1.begin()), end1_(g1.end()), current1_(current1),
+ begin2_(g2.begin()), end2_(g2.end()), current2_(current2),
+ begin3_(g3.begin()), end3_(g3.end()), current3_(current3),
+ begin4_(g4.begin()), end4_(g4.end()), current4_(current4),
+ begin5_(g5.begin()), end5_(g5.end()), current5_(current5),
+ begin6_(g6.begin()), end6_(g6.end()), current6_(current6),
+ begin7_(g7.begin()), end7_(g7.end()), current7_(current7),
+ begin8_(g8.begin()), end8_(g8.end()), current8_(current8) {
+ ComputeCurrentValue();
+ }
+ virtual ~Iterator() {}
+
+ virtual const ParamGeneratorInterface<ParamType>* BaseGenerator() const {
+ return base_;
+ }
+ // Advance should not be called on beyond-of-range iterators
+ // so no component iterators must be beyond end of range, either.
+ virtual void Advance() {
+ assert(!AtEnd());
+ ++current8_;
+ if (current8_ == end8_) {
+ current8_ = begin8_;
+ ++current7_;
+ }
+ if (current7_ == end7_) {
+ current7_ = begin7_;
+ ++current6_;
+ }
+ if (current6_ == end6_) {
+ current6_ = begin6_;
+ ++current5_;
+ }
+ if (current5_ == end5_) {
+ current5_ = begin5_;
+ ++current4_;
+ }
+ if (current4_ == end4_) {
+ current4_ = begin4_;
+ ++current3_;
+ }
+ if (current3_ == end3_) {
+ current3_ = begin3_;
+ ++current2_;
+ }
+ if (current2_ == end2_) {
+ current2_ = begin2_;
+ ++current1_;
+ }
+ ComputeCurrentValue();
+ }
+ virtual ParamIteratorInterface<ParamType>* Clone() const {
+ return new Iterator(*this);
+ }
+ virtual const ParamType* Current() const { return &current_value_; }
+ virtual bool Equals(const ParamIteratorInterface<ParamType>& other) const {
+ // Having the same base generator guarantees that the other
+ // iterator is of the same type and we can downcast.
+ GTEST_CHECK_(BaseGenerator() == other.BaseGenerator())
+ << "The program attempted to compare iterators "
+ << "from different generators." << std::endl;
+ const Iterator* typed_other =
+ CheckedDowncastToActualType<const Iterator>(&other);
+ // We must report iterators equal if they both point beyond their
+ // respective ranges. That can happen in a variety of fashions,
+ // so we have to consult AtEnd().
+ return (AtEnd() && typed_other->AtEnd()) ||
+ (
+ current1_ == typed_other->current1_ &&
+ current2_ == typed_other->current2_ &&
+ current3_ == typed_other->current3_ &&
+ current4_ == typed_other->current4_ &&
+ current5_ == typed_other->current5_ &&
+ current6_ == typed_other->current6_ &&
+ current7_ == typed_other->current7_ &&
+ current8_ == typed_other->current8_);
+ }
+
+ private:
+ Iterator(const Iterator& other)
+ : base_(other.base_),
+ begin1_(other.begin1_),
+ end1_(other.end1_),
+ current1_(other.current1_),
+ begin2_(other.begin2_),
+ end2_(other.end2_),
+ current2_(other.current2_),
+ begin3_(other.begin3_),
+ end3_(other.end3_),
+ current3_(other.current3_),
+ begin4_(other.begin4_),
+ end4_(other.end4_),
+ current4_(other.current4_),
+ begin5_(other.begin5_),
+ end5_(other.end5_),
+ current5_(other.current5_),
+ begin6_(other.begin6_),
+ end6_(other.end6_),
+ current6_(other.current6_),
+ begin7_(other.begin7_),
+ end7_(other.end7_),
+ current7_(other.current7_),
+ begin8_(other.begin8_),
+ end8_(other.end8_),
+ current8_(other.current8_) {
+ ComputeCurrentValue();
+ }
+
+ void ComputeCurrentValue() {
+ if (!AtEnd())
+ current_value_ = ParamType(*current1_, *current2_, *current3_,
+ *current4_, *current5_, *current6_, *current7_, *current8_);
+ }
+ bool AtEnd() const {
+ // We must report iterator past the end of the range when either of the
+ // component iterators has reached the end of its range.
+ return
+ current1_ == end1_ ||
+ current2_ == end2_ ||
+ current3_ == end3_ ||
+ current4_ == end4_ ||
+ current5_ == end5_ ||
+ current6_ == end6_ ||
+ current7_ == end7_ ||
+ current8_ == end8_;
+ }
+
+ // No implementation - assignment is unsupported.
+ void operator=(const Iterator& other);
+
+ const ParamGeneratorInterface<ParamType>* const base_;
+ // begin[i]_ and end[i]_ define the i-th range that Iterator traverses.
+ // current[i]_ is the actual traversing iterator.
+ const typename ParamGenerator<T1>::iterator begin1_;
+ const typename ParamGenerator<T1>::iterator end1_;
+ typename ParamGenerator<T1>::iterator current1_;
+ const typename ParamGenerator<T2>::iterator begin2_;
+ const typename ParamGenerator<T2>::iterator end2_;
+ typename ParamGenerator<T2>::iterator current2_;
+ const typename ParamGenerator<T3>::iterator begin3_;
+ const typename ParamGenerator<T3>::iterator end3_;
+ typename ParamGenerator<T3>::iterator current3_;
+ const typename ParamGenerator<T4>::iterator begin4_;
+ const typename ParamGenerator<T4>::iterator end4_;
+ typename ParamGenerator<T4>::iterator current4_;
+ const typename ParamGenerator<T5>::iterator begin5_;
+ const typename ParamGenerator<T5>::iterator end5_;
+ typename ParamGenerator<T5>::iterator current5_;
+ const typename ParamGenerator<T6>::iterator begin6_;
+ const typename ParamGenerator<T6>::iterator end6_;
+ typename ParamGenerator<T6>::iterator current6_;
+ const typename ParamGenerator<T7>::iterator begin7_;
+ const typename ParamGenerator<T7>::iterator end7_;
+ typename ParamGenerator<T7>::iterator current7_;
+ const typename ParamGenerator<T8>::iterator begin8_;
+ const typename ParamGenerator<T8>::iterator end8_;
+ typename ParamGenerator<T8>::iterator current8_;
+ ParamType current_value_;
+ }; // class CartesianProductGenerator8::Iterator
+
+ // No implementation - assignment is unsupported.
+ void operator=(const CartesianProductGenerator8& other);
+
+ const ParamGenerator<T1> g1_;
+ const ParamGenerator<T2> g2_;
+ const ParamGenerator<T3> g3_;
+ const ParamGenerator<T4> g4_;
+ const ParamGenerator<T5> g5_;
+ const ParamGenerator<T6> g6_;
+ const ParamGenerator<T7> g7_;
+ const ParamGenerator<T8> g8_;
+}; // class CartesianProductGenerator8
+
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9>
+class CartesianProductGenerator9
+ : public ParamGeneratorInterface< ::std::tr1::tuple<T1, T2, T3, T4, T5, T6,
+ T7, T8, T9> > {
+ public:
+ typedef ::std::tr1::tuple<T1, T2, T3, T4, T5, T6, T7, T8, T9> ParamType;
+
+ CartesianProductGenerator9(const ParamGenerator<T1>& g1,
+ const ParamGenerator<T2>& g2, const ParamGenerator<T3>& g3,
+ const ParamGenerator<T4>& g4, const ParamGenerator<T5>& g5,
+ const ParamGenerator<T6>& g6, const ParamGenerator<T7>& g7,
+ const ParamGenerator<T8>& g8, const ParamGenerator<T9>& g9)
+ : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6), g7_(g7), g8_(g8),
+ g9_(g9) {}
+ virtual ~CartesianProductGenerator9() {}
+
+ virtual ParamIteratorInterface<ParamType>* Begin() const {
+ return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin(), g3_,
+ g3_.begin(), g4_, g4_.begin(), g5_, g5_.begin(), g6_, g6_.begin(), g7_,
+ g7_.begin(), g8_, g8_.begin(), g9_, g9_.begin());
+ }
+ virtual ParamIteratorInterface<ParamType>* End() const {
+ return new Iterator(this, g1_, g1_.end(), g2_, g2_.end(), g3_, g3_.end(),
+ g4_, g4_.end(), g5_, g5_.end(), g6_, g6_.end(), g7_, g7_.end(), g8_,
+ g8_.end(), g9_, g9_.end());
+ }
+
+ private:
+ class Iterator : public ParamIteratorInterface<ParamType> {
+ public:
+ Iterator(const ParamGeneratorInterface<ParamType>* base,
+ const ParamGenerator<T1>& g1,
+ const typename ParamGenerator<T1>::iterator& current1,
+ const ParamGenerator<T2>& g2,
+ const typename ParamGenerator<T2>::iterator& current2,
+ const ParamGenerator<T3>& g3,
+ const typename ParamGenerator<T3>::iterator& current3,
+ const ParamGenerator<T4>& g4,
+ const typename ParamGenerator<T4>::iterator& current4,
+ const ParamGenerator<T5>& g5,
+ const typename ParamGenerator<T5>::iterator& current5,
+ const ParamGenerator<T6>& g6,
+ const typename ParamGenerator<T6>::iterator& current6,
+ const ParamGenerator<T7>& g7,
+ const typename ParamGenerator<T7>::iterator& current7,
+ const ParamGenerator<T8>& g8,
+ const typename ParamGenerator<T8>::iterator& current8,
+ const ParamGenerator<T9>& g9,
+ const typename ParamGenerator<T9>::iterator& current9)
+ : base_(base),
+ begin1_(g1.begin()), end1_(g1.end()), current1_(current1),
+ begin2_(g2.begin()), end2_(g2.end()), current2_(current2),
+ begin3_(g3.begin()), end3_(g3.end()), current3_(current3),
+ begin4_(g4.begin()), end4_(g4.end()), current4_(current4),
+ begin5_(g5.begin()), end5_(g5.end()), current5_(current5),
+ begin6_(g6.begin()), end6_(g6.end()), current6_(current6),
+ begin7_(g7.begin()), end7_(g7.end()), current7_(current7),
+ begin8_(g8.begin()), end8_(g8.end()), current8_(current8),
+ begin9_(g9.begin()), end9_(g9.end()), current9_(current9) {
+ ComputeCurrentValue();
+ }
+ virtual ~Iterator() {}
+
+ virtual const ParamGeneratorInterface<ParamType>* BaseGenerator() const {
+ return base_;
+ }
+ // Advance should not be called on beyond-of-range iterators
+ // so no component iterators must be beyond end of range, either.
+ virtual void Advance() {
+ assert(!AtEnd());
+ ++current9_;
+ if (current9_ == end9_) {
+ current9_ = begin9_;
+ ++current8_;
+ }
+ if (current8_ == end8_) {
+ current8_ = begin8_;
+ ++current7_;
+ }
+ if (current7_ == end7_) {
+ current7_ = begin7_;
+ ++current6_;
+ }
+ if (current6_ == end6_) {
+ current6_ = begin6_;
+ ++current5_;
+ }
+ if (current5_ == end5_) {
+ current5_ = begin5_;
+ ++current4_;
+ }
+ if (current4_ == end4_) {
+ current4_ = begin4_;
+ ++current3_;
+ }
+ if (current3_ == end3_) {
+ current3_ = begin3_;
+ ++current2_;
+ }
+ if (current2_ == end2_) {
+ current2_ = begin2_;
+ ++current1_;
+ }
+ ComputeCurrentValue();
+ }
+ virtual ParamIteratorInterface<ParamType>* Clone() const {
+ return new Iterator(*this);
+ }
+ virtual const ParamType* Current() const { return &current_value_; }
+ virtual bool Equals(const ParamIteratorInterface<ParamType>& other) const {
+ // Having the same base generator guarantees that the other
+ // iterator is of the same type and we can downcast.
+ GTEST_CHECK_(BaseGenerator() == other.BaseGenerator())
+ << "The program attempted to compare iterators "
+ << "from different generators." << std::endl;
+ const Iterator* typed_other =
+ CheckedDowncastToActualType<const Iterator>(&other);
+ // We must report iterators equal if they both point beyond their
+ // respective ranges. That can happen in a variety of fashions,
+ // so we have to consult AtEnd().
+ return (AtEnd() && typed_other->AtEnd()) ||
+ (
+ current1_ == typed_other->current1_ &&
+ current2_ == typed_other->current2_ &&
+ current3_ == typed_other->current3_ &&
+ current4_ == typed_other->current4_ &&
+ current5_ == typed_other->current5_ &&
+ current6_ == typed_other->current6_ &&
+ current7_ == typed_other->current7_ &&
+ current8_ == typed_other->current8_ &&
+ current9_ == typed_other->current9_);
+ }
+
+ private:
+ Iterator(const Iterator& other)
+ : base_(other.base_),
+ begin1_(other.begin1_),
+ end1_(other.end1_),
+ current1_(other.current1_),
+ begin2_(other.begin2_),
+ end2_(other.end2_),
+ current2_(other.current2_),
+ begin3_(other.begin3_),
+ end3_(other.end3_),
+ current3_(other.current3_),
+ begin4_(other.begin4_),
+ end4_(other.end4_),
+ current4_(other.current4_),
+ begin5_(other.begin5_),
+ end5_(other.end5_),
+ current5_(other.current5_),
+ begin6_(other.begin6_),
+ end6_(other.end6_),
+ current6_(other.current6_),
+ begin7_(other.begin7_),
+ end7_(other.end7_),
+ current7_(other.current7_),
+ begin8_(other.begin8_),
+ end8_(other.end8_),
+ current8_(other.current8_),
+ begin9_(other.begin9_),
+ end9_(other.end9_),
+ current9_(other.current9_) {
+ ComputeCurrentValue();
+ }
+
+ void ComputeCurrentValue() {
+ if (!AtEnd())
+ current_value_ = ParamType(*current1_, *current2_, *current3_,
+ *current4_, *current5_, *current6_, *current7_, *current8_,
+ *current9_);
+ }
+ bool AtEnd() const {
+ // We must report iterator past the end of the range when either of the
+ // component iterators has reached the end of its range.
+ return
+ current1_ == end1_ ||
+ current2_ == end2_ ||
+ current3_ == end3_ ||
+ current4_ == end4_ ||
+ current5_ == end5_ ||
+ current6_ == end6_ ||
+ current7_ == end7_ ||
+ current8_ == end8_ ||
+ current9_ == end9_;
+ }
+
+ // No implementation - assignment is unsupported.
+ void operator=(const Iterator& other);
+
+ const ParamGeneratorInterface<ParamType>* const base_;
+ // begin[i]_ and end[i]_ define the i-th range that Iterator traverses.
+ // current[i]_ is the actual traversing iterator.
+ const typename ParamGenerator<T1>::iterator begin1_;
+ const typename ParamGenerator<T1>::iterator end1_;
+ typename ParamGenerator<T1>::iterator current1_;
+ const typename ParamGenerator<T2>::iterator begin2_;
+ const typename ParamGenerator<T2>::iterator end2_;
+ typename ParamGenerator<T2>::iterator current2_;
+ const typename ParamGenerator<T3>::iterator begin3_;
+ const typename ParamGenerator<T3>::iterator end3_;
+ typename ParamGenerator<T3>::iterator current3_;
+ const typename ParamGenerator<T4>::iterator begin4_;
+ const typename ParamGenerator<T4>::iterator end4_;
+ typename ParamGenerator<T4>::iterator current4_;
+ const typename ParamGenerator<T5>::iterator begin5_;
+ const typename ParamGenerator<T5>::iterator end5_;
+ typename ParamGenerator<T5>::iterator current5_;
+ const typename ParamGenerator<T6>::iterator begin6_;
+ const typename ParamGenerator<T6>::iterator end6_;
+ typename ParamGenerator<T6>::iterator current6_;
+ const typename ParamGenerator<T7>::iterator begin7_;
+ const typename ParamGenerator<T7>::iterator end7_;
+ typename ParamGenerator<T7>::iterator current7_;
+ const typename ParamGenerator<T8>::iterator begin8_;
+ const typename ParamGenerator<T8>::iterator end8_;
+ typename ParamGenerator<T8>::iterator current8_;
+ const typename ParamGenerator<T9>::iterator begin9_;
+ const typename ParamGenerator<T9>::iterator end9_;
+ typename ParamGenerator<T9>::iterator current9_;
+ ParamType current_value_;
+ }; // class CartesianProductGenerator9::Iterator
+
+ // No implementation - assignment is unsupported.
+ void operator=(const CartesianProductGenerator9& other);
+
+ const ParamGenerator<T1> g1_;
+ const ParamGenerator<T2> g2_;
+ const ParamGenerator<T3> g3_;
+ const ParamGenerator<T4> g4_;
+ const ParamGenerator<T5> g5_;
+ const ParamGenerator<T6> g6_;
+ const ParamGenerator<T7> g7_;
+ const ParamGenerator<T8> g8_;
+ const ParamGenerator<T9> g9_;
+}; // class CartesianProductGenerator9
+
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10>
+class CartesianProductGenerator10
+ : public ParamGeneratorInterface< ::std::tr1::tuple<T1, T2, T3, T4, T5, T6,
+ T7, T8, T9, T10> > {
+ public:
+ typedef ::std::tr1::tuple<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10> ParamType;
+
+ CartesianProductGenerator10(const ParamGenerator<T1>& g1,
+ const ParamGenerator<T2>& g2, const ParamGenerator<T3>& g3,
+ const ParamGenerator<T4>& g4, const ParamGenerator<T5>& g5,
+ const ParamGenerator<T6>& g6, const ParamGenerator<T7>& g7,
+ const ParamGenerator<T8>& g8, const ParamGenerator<T9>& g9,
+ const ParamGenerator<T10>& g10)
+ : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6), g7_(g7), g8_(g8),
+ g9_(g9), g10_(g10) {}
+ virtual ~CartesianProductGenerator10() {}
+
+ virtual ParamIteratorInterface<ParamType>* Begin() const {
+ return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin(), g3_,
+ g3_.begin(), g4_, g4_.begin(), g5_, g5_.begin(), g6_, g6_.begin(), g7_,
+ g7_.begin(), g8_, g8_.begin(), g9_, g9_.begin(), g10_, g10_.begin());
+ }
+ virtual ParamIteratorInterface<ParamType>* End() const {
+ return new Iterator(this, g1_, g1_.end(), g2_, g2_.end(), g3_, g3_.end(),
+ g4_, g4_.end(), g5_, g5_.end(), g6_, g6_.end(), g7_, g7_.end(), g8_,
+ g8_.end(), g9_, g9_.end(), g10_, g10_.end());
+ }
+
+ private:
+ class Iterator : public ParamIteratorInterface<ParamType> {
+ public:
+ Iterator(const ParamGeneratorInterface<ParamType>* base,
+ const ParamGenerator<T1>& g1,
+ const typename ParamGenerator<T1>::iterator& current1,
+ const ParamGenerator<T2>& g2,
+ const typename ParamGenerator<T2>::iterator& current2,
+ const ParamGenerator<T3>& g3,
+ const typename ParamGenerator<T3>::iterator& current3,
+ const ParamGenerator<T4>& g4,
+ const typename ParamGenerator<T4>::iterator& current4,
+ const ParamGenerator<T5>& g5,
+ const typename ParamGenerator<T5>::iterator& current5,
+ const ParamGenerator<T6>& g6,
+ const typename ParamGenerator<T6>::iterator& current6,
+ const ParamGenerator<T7>& g7,
+ const typename ParamGenerator<T7>::iterator& current7,
+ const ParamGenerator<T8>& g8,
+ const typename ParamGenerator<T8>::iterator& current8,
+ const ParamGenerator<T9>& g9,
+ const typename ParamGenerator<T9>::iterator& current9,
+ const ParamGenerator<T10>& g10,
+ const typename ParamGenerator<T10>::iterator& current10)
+ : base_(base),
+ begin1_(g1.begin()), end1_(g1.end()), current1_(current1),
+ begin2_(g2.begin()), end2_(g2.end()), current2_(current2),
+ begin3_(g3.begin()), end3_(g3.end()), current3_(current3),
+ begin4_(g4.begin()), end4_(g4.end()), current4_(current4),
+ begin5_(g5.begin()), end5_(g5.end()), current5_(current5),
+ begin6_(g6.begin()), end6_(g6.end()), current6_(current6),
+ begin7_(g7.begin()), end7_(g7.end()), current7_(current7),
+ begin8_(g8.begin()), end8_(g8.end()), current8_(current8),
+ begin9_(g9.begin()), end9_(g9.end()), current9_(current9),
+ begin10_(g10.begin()), end10_(g10.end()), current10_(current10) {
+ ComputeCurrentValue();
+ }
+ virtual ~Iterator() {}
+
+ virtual const ParamGeneratorInterface<ParamType>* BaseGenerator() const {
+ return base_;
+ }
+ // Advance should not be called on beyond-of-range iterators
+ // so no component iterators must be beyond end of range, either.
+ virtual void Advance() {
+ assert(!AtEnd());
+ ++current10_;
+ if (current10_ == end10_) {
+ current10_ = begin10_;
+ ++current9_;
+ }
+ if (current9_ == end9_) {
+ current9_ = begin9_;
+ ++current8_;
+ }
+ if (current8_ == end8_) {
+ current8_ = begin8_;
+ ++current7_;
+ }
+ if (current7_ == end7_) {
+ current7_ = begin7_;
+ ++current6_;
+ }
+ if (current6_ == end6_) {
+ current6_ = begin6_;
+ ++current5_;
+ }
+ if (current5_ == end5_) {
+ current5_ = begin5_;
+ ++current4_;
+ }
+ if (current4_ == end4_) {
+ current4_ = begin4_;
+ ++current3_;
+ }
+ if (current3_ == end3_) {
+ current3_ = begin3_;
+ ++current2_;
+ }
+ if (current2_ == end2_) {
+ current2_ = begin2_;
+ ++current1_;
+ }
+ ComputeCurrentValue();
+ }
+ virtual ParamIteratorInterface<ParamType>* Clone() const {
+ return new Iterator(*this);
+ }
+ virtual const ParamType* Current() const { return &current_value_; }
+ virtual bool Equals(const ParamIteratorInterface<ParamType>& other) const {
+ // Having the same base generator guarantees that the other
+ // iterator is of the same type and we can downcast.
+ GTEST_CHECK_(BaseGenerator() == other.BaseGenerator())
+ << "The program attempted to compare iterators "
+ << "from different generators." << std::endl;
+ const Iterator* typed_other =
+ CheckedDowncastToActualType<const Iterator>(&other);
+ // We must report iterators equal if they both point beyond their
+ // respective ranges. That can happen in a variety of fashions,
+ // so we have to consult AtEnd().
+ return (AtEnd() && typed_other->AtEnd()) ||
+ (
+ current1_ == typed_other->current1_ &&
+ current2_ == typed_other->current2_ &&
+ current3_ == typed_other->current3_ &&
+ current4_ == typed_other->current4_ &&
+ current5_ == typed_other->current5_ &&
+ current6_ == typed_other->current6_ &&
+ current7_ == typed_other->current7_ &&
+ current8_ == typed_other->current8_ &&
+ current9_ == typed_other->current9_ &&
+ current10_ == typed_other->current10_);
+ }
+
+ private:
+ Iterator(const Iterator& other)
+ : base_(other.base_),
+ begin1_(other.begin1_),
+ end1_(other.end1_),
+ current1_(other.current1_),
+ begin2_(other.begin2_),
+ end2_(other.end2_),
+ current2_(other.current2_),
+ begin3_(other.begin3_),
+ end3_(other.end3_),
+ current3_(other.current3_),
+ begin4_(other.begin4_),
+ end4_(other.end4_),
+ current4_(other.current4_),
+ begin5_(other.begin5_),
+ end5_(other.end5_),
+ current5_(other.current5_),
+ begin6_(other.begin6_),
+ end6_(other.end6_),
+ current6_(other.current6_),
+ begin7_(other.begin7_),
+ end7_(other.end7_),
+ current7_(other.current7_),
+ begin8_(other.begin8_),
+ end8_(other.end8_),
+ current8_(other.current8_),
+ begin9_(other.begin9_),
+ end9_(other.end9_),
+ current9_(other.current9_),
+ begin10_(other.begin10_),
+ end10_(other.end10_),
+ current10_(other.current10_) {
+ ComputeCurrentValue();
+ }
+
+ void ComputeCurrentValue() {
+ if (!AtEnd())
+ current_value_ = ParamType(*current1_, *current2_, *current3_,
+ *current4_, *current5_, *current6_, *current7_, *current8_,
+ *current9_, *current10_);
+ }
+ bool AtEnd() const {
+ // We must report iterator past the end of the range when either of the
+ // component iterators has reached the end of its range.
+ return
+ current1_ == end1_ ||
+ current2_ == end2_ ||
+ current3_ == end3_ ||
+ current4_ == end4_ ||
+ current5_ == end5_ ||
+ current6_ == end6_ ||
+ current7_ == end7_ ||
+ current8_ == end8_ ||
+ current9_ == end9_ ||
+ current10_ == end10_;
+ }
+
+ // No implementation - assignment is unsupported.
+ void operator=(const Iterator& other);
+
+ const ParamGeneratorInterface<ParamType>* const base_;
+ // begin[i]_ and end[i]_ define the i-th range that Iterator traverses.
+ // current[i]_ is the actual traversing iterator.
+ const typename ParamGenerator<T1>::iterator begin1_;
+ const typename ParamGenerator<T1>::iterator end1_;
+ typename ParamGenerator<T1>::iterator current1_;
+ const typename ParamGenerator<T2>::iterator begin2_;
+ const typename ParamGenerator<T2>::iterator end2_;
+ typename ParamGenerator<T2>::iterator current2_;
+ const typename ParamGenerator<T3>::iterator begin3_;
+ const typename ParamGenerator<T3>::iterator end3_;
+ typename ParamGenerator<T3>::iterator current3_;
+ const typename ParamGenerator<T4>::iterator begin4_;
+ const typename ParamGenerator<T4>::iterator end4_;
+ typename ParamGenerator<T4>::iterator current4_;
+ const typename ParamGenerator<T5>::iterator begin5_;
+ const typename ParamGenerator<T5>::iterator end5_;
+ typename ParamGenerator<T5>::iterator current5_;
+ const typename ParamGenerator<T6>::iterator begin6_;
+ const typename ParamGenerator<T6>::iterator end6_;
+ typename ParamGenerator<T6>::iterator current6_;
+ const typename ParamGenerator<T7>::iterator begin7_;
+ const typename ParamGenerator<T7>::iterator end7_;
+ typename ParamGenerator<T7>::iterator current7_;
+ const typename ParamGenerator<T8>::iterator begin8_;
+ const typename ParamGenerator<T8>::iterator end8_;
+ typename ParamGenerator<T8>::iterator current8_;
+ const typename ParamGenerator<T9>::iterator begin9_;
+ const typename ParamGenerator<T9>::iterator end9_;
+ typename ParamGenerator<T9>::iterator current9_;
+ const typename ParamGenerator<T10>::iterator begin10_;
+ const typename ParamGenerator<T10>::iterator end10_;
+ typename ParamGenerator<T10>::iterator current10_;
+ ParamType current_value_;
+ }; // class CartesianProductGenerator10::Iterator
+
+ // No implementation - assignment is unsupported.
+ void operator=(const CartesianProductGenerator10& other);
+
+ const ParamGenerator<T1> g1_;
+ const ParamGenerator<T2> g2_;
+ const ParamGenerator<T3> g3_;
+ const ParamGenerator<T4> g4_;
+ const ParamGenerator<T5> g5_;
+ const ParamGenerator<T6> g6_;
+ const ParamGenerator<T7> g7_;
+ const ParamGenerator<T8> g8_;
+ const ParamGenerator<T9> g9_;
+ const ParamGenerator<T10> g10_;
+}; // class CartesianProductGenerator10
+
+
+// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
+//
+// Helper classes providing Combine() with polymorphic features. They allow
+// casting CartesianProductGeneratorN<T> to ParamGenerator<U> if T is
+// convertible to U.
+//
+template <class Generator1, class Generator2>
+class CartesianProductHolder2 {
+ public:
+CartesianProductHolder2(const Generator1& g1, const Generator2& g2)
+ : g1_(g1), g2_(g2) {}
+ template <typename T1, typename T2>
+ operator ParamGenerator< ::std::tr1::tuple<T1, T2> >() const {
+ return ParamGenerator< ::std::tr1::tuple<T1, T2> >(
+ new CartesianProductGenerator2<T1, T2>(
+ static_cast<ParamGenerator<T1> >(g1_),
+ static_cast<ParamGenerator<T2> >(g2_)));
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const CartesianProductHolder2& other);
+
+ const Generator1 g1_;
+ const Generator2 g2_;
+}; // class CartesianProductHolder2
+
+template <class Generator1, class Generator2, class Generator3>
+class CartesianProductHolder3 {
+ public:
+CartesianProductHolder3(const Generator1& g1, const Generator2& g2,
+ const Generator3& g3)
+ : g1_(g1), g2_(g2), g3_(g3) {}
+ template <typename T1, typename T2, typename T3>
+ operator ParamGenerator< ::std::tr1::tuple<T1, T2, T3> >() const {
+ return ParamGenerator< ::std::tr1::tuple<T1, T2, T3> >(
+ new CartesianProductGenerator3<T1, T2, T3>(
+ static_cast<ParamGenerator<T1> >(g1_),
+ static_cast<ParamGenerator<T2> >(g2_),
+ static_cast<ParamGenerator<T3> >(g3_)));
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const CartesianProductHolder3& other);
+
+ const Generator1 g1_;
+ const Generator2 g2_;
+ const Generator3 g3_;
+}; // class CartesianProductHolder3
+
+template <class Generator1, class Generator2, class Generator3,
+ class Generator4>
+class CartesianProductHolder4 {
+ public:
+CartesianProductHolder4(const Generator1& g1, const Generator2& g2,
+ const Generator3& g3, const Generator4& g4)
+ : g1_(g1), g2_(g2), g3_(g3), g4_(g4) {}
+ template <typename T1, typename T2, typename T3, typename T4>
+ operator ParamGenerator< ::std::tr1::tuple<T1, T2, T3, T4> >() const {
+ return ParamGenerator< ::std::tr1::tuple<T1, T2, T3, T4> >(
+ new CartesianProductGenerator4<T1, T2, T3, T4>(
+ static_cast<ParamGenerator<T1> >(g1_),
+ static_cast<ParamGenerator<T2> >(g2_),
+ static_cast<ParamGenerator<T3> >(g3_),
+ static_cast<ParamGenerator<T4> >(g4_)));
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const CartesianProductHolder4& other);
+
+ const Generator1 g1_;
+ const Generator2 g2_;
+ const Generator3 g3_;
+ const Generator4 g4_;
+}; // class CartesianProductHolder4
+
+template <class Generator1, class Generator2, class Generator3,
+ class Generator4, class Generator5>
+class CartesianProductHolder5 {
+ public:
+CartesianProductHolder5(const Generator1& g1, const Generator2& g2,
+ const Generator3& g3, const Generator4& g4, const Generator5& g5)
+ : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5) {}
+ template <typename T1, typename T2, typename T3, typename T4, typename T5>
+ operator ParamGenerator< ::std::tr1::tuple<T1, T2, T3, T4, T5> >() const {
+ return ParamGenerator< ::std::tr1::tuple<T1, T2, T3, T4, T5> >(
+ new CartesianProductGenerator5<T1, T2, T3, T4, T5>(
+ static_cast<ParamGenerator<T1> >(g1_),
+ static_cast<ParamGenerator<T2> >(g2_),
+ static_cast<ParamGenerator<T3> >(g3_),
+ static_cast<ParamGenerator<T4> >(g4_),
+ static_cast<ParamGenerator<T5> >(g5_)));
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const CartesianProductHolder5& other);
+
+ const Generator1 g1_;
+ const Generator2 g2_;
+ const Generator3 g3_;
+ const Generator4 g4_;
+ const Generator5 g5_;
+}; // class CartesianProductHolder5
+
+template <class Generator1, class Generator2, class Generator3,
+ class Generator4, class Generator5, class Generator6>
+class CartesianProductHolder6 {
+ public:
+CartesianProductHolder6(const Generator1& g1, const Generator2& g2,
+ const Generator3& g3, const Generator4& g4, const Generator5& g5,
+ const Generator6& g6)
+ : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6) {}
+ template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6>
+ operator ParamGenerator< ::std::tr1::tuple<T1, T2, T3, T4, T5, T6> >() const {
+ return ParamGenerator< ::std::tr1::tuple<T1, T2, T3, T4, T5, T6> >(
+ new CartesianProductGenerator6<T1, T2, T3, T4, T5, T6>(
+ static_cast<ParamGenerator<T1> >(g1_),
+ static_cast<ParamGenerator<T2> >(g2_),
+ static_cast<ParamGenerator<T3> >(g3_),
+ static_cast<ParamGenerator<T4> >(g4_),
+ static_cast<ParamGenerator<T5> >(g5_),
+ static_cast<ParamGenerator<T6> >(g6_)));
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const CartesianProductHolder6& other);
+
+ const Generator1 g1_;
+ const Generator2 g2_;
+ const Generator3 g3_;
+ const Generator4 g4_;
+ const Generator5 g5_;
+ const Generator6 g6_;
+}; // class CartesianProductHolder6
+
+template <class Generator1, class Generator2, class Generator3,
+ class Generator4, class Generator5, class Generator6, class Generator7>
+class CartesianProductHolder7 {
+ public:
+CartesianProductHolder7(const Generator1& g1, const Generator2& g2,
+ const Generator3& g3, const Generator4& g4, const Generator5& g5,
+ const Generator6& g6, const Generator7& g7)
+ : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6), g7_(g7) {}
+ template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7>
+ operator ParamGenerator< ::std::tr1::tuple<T1, T2, T3, T4, T5, T6,
+ T7> >() const {
+ return ParamGenerator< ::std::tr1::tuple<T1, T2, T3, T4, T5, T6, T7> >(
+ new CartesianProductGenerator7<T1, T2, T3, T4, T5, T6, T7>(
+ static_cast<ParamGenerator<T1> >(g1_),
+ static_cast<ParamGenerator<T2> >(g2_),
+ static_cast<ParamGenerator<T3> >(g3_),
+ static_cast<ParamGenerator<T4> >(g4_),
+ static_cast<ParamGenerator<T5> >(g5_),
+ static_cast<ParamGenerator<T6> >(g6_),
+ static_cast<ParamGenerator<T7> >(g7_)));
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const CartesianProductHolder7& other);
+
+ const Generator1 g1_;
+ const Generator2 g2_;
+ const Generator3 g3_;
+ const Generator4 g4_;
+ const Generator5 g5_;
+ const Generator6 g6_;
+ const Generator7 g7_;
+}; // class CartesianProductHolder7
+
+template <class Generator1, class Generator2, class Generator3,
+ class Generator4, class Generator5, class Generator6, class Generator7,
+ class Generator8>
+class CartesianProductHolder8 {
+ public:
+CartesianProductHolder8(const Generator1& g1, const Generator2& g2,
+ const Generator3& g3, const Generator4& g4, const Generator5& g5,
+ const Generator6& g6, const Generator7& g7, const Generator8& g8)
+ : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6), g7_(g7),
+ g8_(g8) {}
+ template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8>
+ operator ParamGenerator< ::std::tr1::tuple<T1, T2, T3, T4, T5, T6, T7,
+ T8> >() const {
+ return ParamGenerator< ::std::tr1::tuple<T1, T2, T3, T4, T5, T6, T7, T8> >(
+ new CartesianProductGenerator8<T1, T2, T3, T4, T5, T6, T7, T8>(
+ static_cast<ParamGenerator<T1> >(g1_),
+ static_cast<ParamGenerator<T2> >(g2_),
+ static_cast<ParamGenerator<T3> >(g3_),
+ static_cast<ParamGenerator<T4> >(g4_),
+ static_cast<ParamGenerator<T5> >(g5_),
+ static_cast<ParamGenerator<T6> >(g6_),
+ static_cast<ParamGenerator<T7> >(g7_),
+ static_cast<ParamGenerator<T8> >(g8_)));
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const CartesianProductHolder8& other);
+
+ const Generator1 g1_;
+ const Generator2 g2_;
+ const Generator3 g3_;
+ const Generator4 g4_;
+ const Generator5 g5_;
+ const Generator6 g6_;
+ const Generator7 g7_;
+ const Generator8 g8_;
+}; // class CartesianProductHolder8
+
+template <class Generator1, class Generator2, class Generator3,
+ class Generator4, class Generator5, class Generator6, class Generator7,
+ class Generator8, class Generator9>
+class CartesianProductHolder9 {
+ public:
+CartesianProductHolder9(const Generator1& g1, const Generator2& g2,
+ const Generator3& g3, const Generator4& g4, const Generator5& g5,
+ const Generator6& g6, const Generator7& g7, const Generator8& g8,
+ const Generator9& g9)
+ : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6), g7_(g7), g8_(g8),
+ g9_(g9) {}
+ template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9>
+ operator ParamGenerator< ::std::tr1::tuple<T1, T2, T3, T4, T5, T6, T7, T8,
+ T9> >() const {
+ return ParamGenerator< ::std::tr1::tuple<T1, T2, T3, T4, T5, T6, T7, T8,
+ T9> >(
+ new CartesianProductGenerator9<T1, T2, T3, T4, T5, T6, T7, T8, T9>(
+ static_cast<ParamGenerator<T1> >(g1_),
+ static_cast<ParamGenerator<T2> >(g2_),
+ static_cast<ParamGenerator<T3> >(g3_),
+ static_cast<ParamGenerator<T4> >(g4_),
+ static_cast<ParamGenerator<T5> >(g5_),
+ static_cast<ParamGenerator<T6> >(g6_),
+ static_cast<ParamGenerator<T7> >(g7_),
+ static_cast<ParamGenerator<T8> >(g8_),
+ static_cast<ParamGenerator<T9> >(g9_)));
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const CartesianProductHolder9& other);
+
+ const Generator1 g1_;
+ const Generator2 g2_;
+ const Generator3 g3_;
+ const Generator4 g4_;
+ const Generator5 g5_;
+ const Generator6 g6_;
+ const Generator7 g7_;
+ const Generator8 g8_;
+ const Generator9 g9_;
+}; // class CartesianProductHolder9
+
+template <class Generator1, class Generator2, class Generator3,
+ class Generator4, class Generator5, class Generator6, class Generator7,
+ class Generator8, class Generator9, class Generator10>
+class CartesianProductHolder10 {
+ public:
+CartesianProductHolder10(const Generator1& g1, const Generator2& g2,
+ const Generator3& g3, const Generator4& g4, const Generator5& g5,
+ const Generator6& g6, const Generator7& g7, const Generator8& g8,
+ const Generator9& g9, const Generator10& g10)
+ : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6), g7_(g7), g8_(g8),
+ g9_(g9), g10_(g10) {}
+ template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10>
+ operator ParamGenerator< ::std::tr1::tuple<T1, T2, T3, T4, T5, T6, T7, T8,
+ T9, T10> >() const {
+ return ParamGenerator< ::std::tr1::tuple<T1, T2, T3, T4, T5, T6, T7, T8,
+ T9, T10> >(
+ new CartesianProductGenerator10<T1, T2, T3, T4, T5, T6, T7, T8, T9,
+ T10>(
+ static_cast<ParamGenerator<T1> >(g1_),
+ static_cast<ParamGenerator<T2> >(g2_),
+ static_cast<ParamGenerator<T3> >(g3_),
+ static_cast<ParamGenerator<T4> >(g4_),
+ static_cast<ParamGenerator<T5> >(g5_),
+ static_cast<ParamGenerator<T6> >(g6_),
+ static_cast<ParamGenerator<T7> >(g7_),
+ static_cast<ParamGenerator<T8> >(g8_),
+ static_cast<ParamGenerator<T9> >(g9_),
+ static_cast<ParamGenerator<T10> >(g10_)));
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const CartesianProductHolder10& other);
+
+ const Generator1 g1_;
+ const Generator2 g2_;
+ const Generator3 g3_;
+ const Generator4 g4_;
+ const Generator5 g5_;
+ const Generator6 g6_;
+ const Generator7 g7_;
+ const Generator8 g8_;
+ const Generator9 g9_;
+ const Generator10 g10_;
+}; // class CartesianProductHolder10
+
+# endif // GTEST_HAS_COMBINE
+
+} // namespace internal
+} // namespace testing
+
+#endif // GTEST_HAS_PARAM_TEST
+
+#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PARAM_UTIL_GENERATED_H_
+
+#if GTEST_HAS_PARAM_TEST
+
+namespace testing {
+
+// Functions producing parameter generators.
+//
+// Google Test uses these generators to produce parameters for value-
+// parameterized tests. When a parameterized test case is instantiated
+// with a particular generator, Google Test creates and runs tests
+// for each element in the sequence produced by the generator.
+//
+// In the following sample, tests from test case FooTest are instantiated
+// each three times with parameter values 3, 5, and 8:
+//
+// class FooTest : public TestWithParam<int> { ... };
+//
+// TEST_P(FooTest, TestThis) {
+// }
+// TEST_P(FooTest, TestThat) {
+// }
+// INSTANTIATE_TEST_CASE_P(TestSequence, FooTest, Values(3, 5, 8));
+//
+
+// Range() returns generators providing sequences of values in a range.
+//
+// Synopsis:
+// Range(start, end)
+// - returns a generator producing a sequence of values {start, start+1,
+// start+2, ..., }.
+// Range(start, end, step)
+// - returns a generator producing a sequence of values {start, start+step,
+// start+step+step, ..., }.
+// Notes:
+// * The generated sequences never include end. For example, Range(1, 5)
+// returns a generator producing a sequence {1, 2, 3, 4}. Range(1, 9, 2)
+// returns a generator producing {1, 3, 5, 7}.
+// * start and end must have the same type. That type may be any integral or
+// floating-point type or a user defined type satisfying these conditions:
+// * It must be assignable (have operator=() defined).
+// * It must have operator+() (operator+(int-compatible type) for
+// two-operand version).
+// * It must have operator<() defined.
+// Elements in the resulting sequences will also have that type.
+// * Condition start < end must be satisfied in order for resulting sequences
+// to contain any elements.
+//
+template <typename T, typename IncrementT>
+internal::ParamGenerator<T> Range(T start, T end, IncrementT step) {
+ return internal::ParamGenerator<T>(
+ new internal::RangeGenerator<T, IncrementT>(start, end, step));
+}
+
+template <typename T>
+internal::ParamGenerator<T> Range(T start, T end) {
+ return Range(start, end, 1);
+}
+
+// ValuesIn() function allows generation of tests with parameters coming from
+// a container.
+//
+// Synopsis:
+// ValuesIn(const T (&array)[N])
+// - returns a generator producing sequences with elements from
+// a C-style array.
+// ValuesIn(const Container& container)
+// - returns a generator producing sequences with elements from
+// an STL-style container.
+// ValuesIn(Iterator begin, Iterator end)
+// - returns a generator producing sequences with elements from
+// a range [begin, end) defined by a pair of STL-style iterators. These
+// iterators can also be plain C pointers.
+//
+// Please note that ValuesIn copies the values from the containers
+// passed in and keeps them to generate tests in RUN_ALL_TESTS().
+//
+// Examples:
+//
+// This instantiates tests from test case StringTest
+// each with C-string values of "foo", "bar", and "baz":
+//
+// const char* strings[] = {"foo", "bar", "baz"};
+// INSTANTIATE_TEST_CASE_P(StringSequence, SrtingTest, ValuesIn(strings));
+//
+// This instantiates tests from test case StlStringTest
+// each with STL strings with values "a" and "b":
+//
+// ::std::vector< ::std::string> GetParameterStrings() {
+// ::std::vector< ::std::string> v;
+// v.push_back("a");
+// v.push_back("b");
+// return v;
+// }
+//
+// INSTANTIATE_TEST_CASE_P(CharSequence,
+// StlStringTest,
+// ValuesIn(GetParameterStrings()));
+//
+//
+// This will also instantiate tests from CharTest
+// each with parameter values 'a' and 'b':
+//
+// ::std::list<char> GetParameterChars() {
+// ::std::list<char> list;
+// list.push_back('a');
+// list.push_back('b');
+// return list;
+// }
+// ::std::list<char> l = GetParameterChars();
+// INSTANTIATE_TEST_CASE_P(CharSequence2,
+// CharTest,
+// ValuesIn(l.begin(), l.end()));
+//
+template <typename ForwardIterator>
+internal::ParamGenerator<
+ typename ::testing::internal::IteratorTraits<ForwardIterator>::value_type>
+ValuesIn(ForwardIterator begin, ForwardIterator end) {
+ typedef typename ::testing::internal::IteratorTraits<ForwardIterator>
+ ::value_type ParamType;
+ return internal::ParamGenerator<ParamType>(
+ new internal::ValuesInIteratorRangeGenerator<ParamType>(begin, end));
+}
+
+template <typename T, size_t N>
+internal::ParamGenerator<T> ValuesIn(const T (&array)[N]) {
+ return ValuesIn(array, array + N);
+}
+
+template <class Container>
+internal::ParamGenerator<typename Container::value_type> ValuesIn(
+ const Container& container) {
+ return ValuesIn(container.begin(), container.end());
+}
+
+// Values() allows generating tests from explicitly specified list of
+// parameters.
+//
+// Synopsis:
+// Values(T v1, T v2, ..., T vN)
+// - returns a generator producing sequences with elements v1, v2, ..., vN.
+//
+// For example, this instantiates tests from test case BarTest each
+// with values "one", "two", and "three":
+//
+// INSTANTIATE_TEST_CASE_P(NumSequence, BarTest, Values("one", "two", "three"));
+//
+// This instantiates tests from test case BazTest each with values 1, 2, 3.5.
+// The exact type of values will depend on the type of parameter in BazTest.
+//
+// INSTANTIATE_TEST_CASE_P(FloatingNumbers, BazTest, Values(1, 2, 3.5));
+//
+// Currently, Values() supports from 1 to 50 parameters.
+//
+template <typename T1>
+internal::ValueArray1<T1> Values(T1 v1) {
+ return internal::ValueArray1<T1>(v1);
+}
+
+template <typename T1, typename T2>
+internal::ValueArray2<T1, T2> Values(T1 v1, T2 v2) {
+ return internal::ValueArray2<T1, T2>(v1, v2);
+}
+
+template <typename T1, typename T2, typename T3>
+internal::ValueArray3<T1, T2, T3> Values(T1 v1, T2 v2, T3 v3) {
+ return internal::ValueArray3<T1, T2, T3>(v1, v2, v3);
+}
+
+template <typename T1, typename T2, typename T3, typename T4>
+internal::ValueArray4<T1, T2, T3, T4> Values(T1 v1, T2 v2, T3 v3, T4 v4) {
+ return internal::ValueArray4<T1, T2, T3, T4>(v1, v2, v3, v4);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5>
+internal::ValueArray5<T1, T2, T3, T4, T5> Values(T1 v1, T2 v2, T3 v3, T4 v4,
+ T5 v5) {
+ return internal::ValueArray5<T1, T2, T3, T4, T5>(v1, v2, v3, v4, v5);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6>
+internal::ValueArray6<T1, T2, T3, T4, T5, T6> Values(T1 v1, T2 v2, T3 v3,
+ T4 v4, T5 v5, T6 v6) {
+ return internal::ValueArray6<T1, T2, T3, T4, T5, T6>(v1, v2, v3, v4, v5, v6);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7>
+internal::ValueArray7<T1, T2, T3, T4, T5, T6, T7> Values(T1 v1, T2 v2, T3 v3,
+ T4 v4, T5 v5, T6 v6, T7 v7) {
+ return internal::ValueArray7<T1, T2, T3, T4, T5, T6, T7>(v1, v2, v3, v4, v5,
+ v6, v7);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8>
+internal::ValueArray8<T1, T2, T3, T4, T5, T6, T7, T8> Values(T1 v1, T2 v2,
+ T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8) {
+ return internal::ValueArray8<T1, T2, T3, T4, T5, T6, T7, T8>(v1, v2, v3, v4,
+ v5, v6, v7, v8);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9>
+internal::ValueArray9<T1, T2, T3, T4, T5, T6, T7, T8, T9> Values(T1 v1, T2 v2,
+ T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9) {
+ return internal::ValueArray9<T1, T2, T3, T4, T5, T6, T7, T8, T9>(v1, v2, v3,
+ v4, v5, v6, v7, v8, v9);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10>
+internal::ValueArray10<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10> Values(T1 v1,
+ T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10) {
+ return internal::ValueArray10<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10>(v1,
+ v2, v3, v4, v5, v6, v7, v8, v9, v10);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11>
+internal::ValueArray11<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10,
+ T11> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11) {
+ return internal::ValueArray11<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10,
+ T11>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12>
+internal::ValueArray12<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12) {
+ return internal::ValueArray12<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13>
+internal::ValueArray13<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13) {
+ return internal::ValueArray13<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14>
+internal::ValueArray14<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14) {
+ return internal::ValueArray14<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13,
+ v14);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15>
+internal::ValueArray15<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8,
+ T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15) {
+ return internal::ValueArray15<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12,
+ v13, v14, v15);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16>
+internal::ValueArray16<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7,
+ T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15,
+ T16 v16) {
+ return internal::ValueArray16<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11,
+ v12, v13, v14, v15, v16);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17>
+internal::ValueArray17<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7,
+ T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15,
+ T16 v16, T17 v17) {
+ return internal::ValueArray17<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10,
+ v11, v12, v13, v14, v15, v16, v17);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18>
+internal::ValueArray18<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6,
+ T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15,
+ T16 v16, T17 v17, T18 v18) {
+ return internal::ValueArray18<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18>(v1, v2, v3, v4, v5, v6, v7, v8, v9,
+ v10, v11, v12, v13, v14, v15, v16, v17, v18);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19>
+internal::ValueArray19<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5,
+ T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14,
+ T15 v15, T16 v16, T17 v17, T18 v18, T19 v19) {
+ return internal::ValueArray19<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19>(v1, v2, v3, v4, v5, v6, v7, v8,
+ v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20>
+internal::ValueArray20<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20> Values(T1 v1, T2 v2, T3 v3, T4 v4,
+ T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13,
+ T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20) {
+ return internal::ValueArray20<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20>(v1, v2, v3, v4, v5, v6, v7,
+ v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21>
+internal::ValueArray21<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21> Values(T1 v1, T2 v2, T3 v3, T4 v4,
+ T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13,
+ T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21) {
+ return internal::ValueArray21<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21>(v1, v2, v3, v4, v5, v6,
+ v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22>
+internal::ValueArray22<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22> Values(T1 v1, T2 v2, T3 v3,
+ T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12,
+ T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20,
+ T21 v21, T22 v22) {
+ return internal::ValueArray22<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22>(v1, v2, v3, v4,
+ v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19,
+ v20, v21, v22);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23>
+internal::ValueArray23<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23> Values(T1 v1, T2 v2,
+ T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12,
+ T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20,
+ T21 v21, T22 v22, T23 v23) {
+ return internal::ValueArray23<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23>(v1, v2, v3,
+ v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19,
+ v20, v21, v22, v23);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24>
+internal::ValueArray24<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24> Values(T1 v1, T2 v2,
+ T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12,
+ T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20,
+ T21 v21, T22 v22, T23 v23, T24 v24) {
+ return internal::ValueArray24<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24>(v1, v2,
+ v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18,
+ v19, v20, v21, v22, v23, v24);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25>
+internal::ValueArray25<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25> Values(T1 v1,
+ T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11,
+ T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19,
+ T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25) {
+ return internal::ValueArray25<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25>(v1,
+ v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17,
+ v18, v19, v20, v21, v22, v23, v24, v25);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26>
+internal::ValueArray26<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26) {
+ return internal::ValueArray26<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15,
+ v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27>
+internal::ValueArray27<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27) {
+ return internal::ValueArray27<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26, T27>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14,
+ v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28>
+internal::ValueArray28<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+ T28> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28) {
+ return internal::ValueArray28<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26, T27, T28>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13,
+ v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27,
+ v28);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29>
+internal::ValueArray29<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29) {
+ return internal::ValueArray29<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26, T27, T28, T29>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12,
+ v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26,
+ v27, v28, v29);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30>
+internal::ValueArray30<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8,
+ T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16,
+ T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24,
+ T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30) {
+ return internal::ValueArray30<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26, T27, T28, T29, T30>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11,
+ v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25,
+ v26, v27, v28, v29, v30);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31>
+internal::ValueArray31<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7,
+ T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15,
+ T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23,
+ T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31) {
+ return internal::ValueArray31<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26, T27, T28, T29, T30, T31>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10,
+ v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24,
+ v25, v26, v27, v28, v29, v30, v31);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32>
+internal::ValueArray32<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7,
+ T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15,
+ T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23,
+ T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31,
+ T32 v32) {
+ return internal::ValueArray32<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26, T27, T28, T29, T30, T31, T32>(v1, v2, v3, v4, v5, v6, v7, v8, v9,
+ v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23,
+ v24, v25, v26, v27, v28, v29, v30, v31, v32);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33>
+internal::ValueArray33<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6,
+ T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15,
+ T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23,
+ T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31,
+ T32 v32, T33 v33) {
+ return internal::ValueArray33<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26, T27, T28, T29, T30, T31, T32, T33>(v1, v2, v3, v4, v5, v6, v7, v8,
+ v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23,
+ v24, v25, v26, v27, v28, v29, v30, v31, v32, v33);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34>
+internal::ValueArray34<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5,
+ T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14,
+ T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22,
+ T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30,
+ T31 v31, T32 v32, T33 v33, T34 v34) {
+ return internal::ValueArray34<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26, T27, T28, T29, T30, T31, T32, T33, T34>(v1, v2, v3, v4, v5, v6, v7,
+ v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22,
+ v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35>
+internal::ValueArray35<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35> Values(T1 v1, T2 v2, T3 v3, T4 v4,
+ T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13,
+ T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21,
+ T22 v22, T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29,
+ T30 v30, T31 v31, T32 v32, T33 v33, T34 v34, T35 v35) {
+ return internal::ValueArray35<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26, T27, T28, T29, T30, T31, T32, T33, T34, T35>(v1, v2, v3, v4, v5, v6,
+ v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21,
+ v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36>
+internal::ValueArray36<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36> Values(T1 v1, T2 v2, T3 v3, T4 v4,
+ T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13,
+ T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21,
+ T22 v22, T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29,
+ T30 v30, T31 v31, T32 v32, T33 v33, T34 v34, T35 v35, T36 v36) {
+ return internal::ValueArray36<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36>(v1, v2, v3, v4,
+ v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19,
+ v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33,
+ v34, v35, v36);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37>
+internal::ValueArray37<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37> Values(T1 v1, T2 v2, T3 v3,
+ T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12,
+ T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20,
+ T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, T28 v28,
+ T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, T34 v34, T35 v35, T36 v36,
+ T37 v37) {
+ return internal::ValueArray37<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37>(v1, v2, v3,
+ v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19,
+ v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33,
+ v34, v35, v36, v37);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38>
+internal::ValueArray38<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38> Values(T1 v1, T2 v2,
+ T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12,
+ T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20,
+ T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, T28 v28,
+ T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, T34 v34, T35 v35, T36 v36,
+ T37 v37, T38 v38) {
+ return internal::ValueArray38<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38>(v1, v2,
+ v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18,
+ v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32,
+ v33, v34, v35, v36, v37, v38);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39>
+internal::ValueArray39<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39> Values(T1 v1, T2 v2,
+ T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12,
+ T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20,
+ T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, T28 v28,
+ T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, T34 v34, T35 v35, T36 v36,
+ T37 v37, T38 v38, T39 v39) {
+ return internal::ValueArray39<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39>(v1,
+ v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17,
+ v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31,
+ v32, v33, v34, v35, v36, v37, v38, v39);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40>
+internal::ValueArray40<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40> Values(T1 v1,
+ T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11,
+ T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19,
+ T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, T26 v26, T27 v27,
+ T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, T34 v34, T35 v35,
+ T36 v36, T37 v37, T38 v38, T39 v39, T40 v40) {
+ return internal::ValueArray40<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39,
+ T40>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15,
+ v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29,
+ v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41>
+internal::ValueArray41<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40,
+ T41> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+ T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41) {
+ return internal::ValueArray41<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39,
+ T40, T41>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14,
+ v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28,
+ v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42>
+internal::ValueArray42<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41,
+ T42> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+ T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41,
+ T42 v42) {
+ return internal::ValueArray42<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39,
+ T40, T41, T42>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13,
+ v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27,
+ v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41,
+ v42);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43>
+internal::ValueArray43<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42,
+ T43> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+ T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41,
+ T42 v42, T43 v43) {
+ return internal::ValueArray43<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39,
+ T40, T41, T42, T43>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12,
+ v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26,
+ v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40,
+ v41, v42, v43);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44>
+internal::ValueArray44<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
+ T44> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+ T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41,
+ T42 v42, T43 v43, T44 v44) {
+ return internal::ValueArray44<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39,
+ T40, T41, T42, T43, T44>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11,
+ v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25,
+ v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39,
+ v40, v41, v42, v43, v44);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44, typename T45>
+internal::ValueArray45<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
+ T44, T45> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8,
+ T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16,
+ T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24,
+ T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32,
+ T33 v33, T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40,
+ T41 v41, T42 v42, T43 v43, T44 v44, T45 v45) {
+ return internal::ValueArray45<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39,
+ T40, T41, T42, T43, T44, T45>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10,
+ v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24,
+ v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38,
+ v39, v40, v41, v42, v43, v44, v45);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44, typename T45,
+ typename T46>
+internal::ValueArray46<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
+ T44, T45, T46> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7,
+ T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15,
+ T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23,
+ T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31,
+ T32 v32, T33 v33, T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39,
+ T40 v40, T41 v41, T42 v42, T43 v43, T44 v44, T45 v45, T46 v46) {
+ return internal::ValueArray46<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39,
+ T40, T41, T42, T43, T44, T45, T46>(v1, v2, v3, v4, v5, v6, v7, v8, v9,
+ v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23,
+ v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37,
+ v38, v39, v40, v41, v42, v43, v44, v45, v46);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44, typename T45,
+ typename T46, typename T47>
+internal::ValueArray47<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
+ T44, T45, T46, T47> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7,
+ T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15,
+ T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23,
+ T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31,
+ T32 v32, T33 v33, T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39,
+ T40 v40, T41 v41, T42 v42, T43 v43, T44 v44, T45 v45, T46 v46, T47 v47) {
+ return internal::ValueArray47<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39,
+ T40, T41, T42, T43, T44, T45, T46, T47>(v1, v2, v3, v4, v5, v6, v7, v8,
+ v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23,
+ v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37,
+ v38, v39, v40, v41, v42, v43, v44, v45, v46, v47);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44, typename T45,
+ typename T46, typename T47, typename T48>
+internal::ValueArray48<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
+ T44, T45, T46, T47, T48> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6,
+ T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15,
+ T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23,
+ T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31,
+ T32 v32, T33 v33, T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39,
+ T40 v40, T41 v41, T42 v42, T43 v43, T44 v44, T45 v45, T46 v46, T47 v47,
+ T48 v48) {
+ return internal::ValueArray48<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39,
+ T40, T41, T42, T43, T44, T45, T46, T47, T48>(v1, v2, v3, v4, v5, v6, v7,
+ v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22,
+ v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36,
+ v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44, typename T45,
+ typename T46, typename T47, typename T48, typename T49>
+internal::ValueArray49<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
+ T44, T45, T46, T47, T48, T49> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5,
+ T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14,
+ T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22,
+ T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30,
+ T31 v31, T32 v32, T33 v33, T34 v34, T35 v35, T36 v36, T37 v37, T38 v38,
+ T39 v39, T40 v40, T41 v41, T42 v42, T43 v43, T44 v44, T45 v45, T46 v46,
+ T47 v47, T48 v48, T49 v49) {
+ return internal::ValueArray49<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39,
+ T40, T41, T42, T43, T44, T45, T46, T47, T48, T49>(v1, v2, v3, v4, v5, v6,
+ v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21,
+ v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35,
+ v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44, typename T45,
+ typename T46, typename T47, typename T48, typename T49, typename T50>
+internal::ValueArray50<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
+ T44, T45, T46, T47, T48, T49, T50> Values(T1 v1, T2 v2, T3 v3, T4 v4,
+ T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13,
+ T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21,
+ T22 v22, T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29,
+ T30 v30, T31 v31, T32 v32, T33 v33, T34 v34, T35 v35, T36 v36, T37 v37,
+ T38 v38, T39 v39, T40 v40, T41 v41, T42 v42, T43 v43, T44 v44, T45 v45,
+ T46 v46, T47 v47, T48 v48, T49 v49, T50 v50) {
+ return internal::ValueArray50<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39,
+ T40, T41, T42, T43, T44, T45, T46, T47, T48, T49, T50>(v1, v2, v3, v4,
+ v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19,
+ v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33,
+ v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47,
+ v48, v49, v50);
+}
+
+// Bool() allows generating tests with parameters in a set of (false, true).
+//
+// Synopsis:
+// Bool()
+// - returns a generator producing sequences with elements {false, true}.
+//
+// It is useful when testing code that depends on Boolean flags. Combinations
+// of multiple flags can be tested when several Bool()'s are combined using
+// Combine() function.
+//
+// In the following example all tests in the test case FlagDependentTest
+// will be instantiated twice with parameters false and true.
+//
+// class FlagDependentTest : public testing::TestWithParam<bool> {
+// virtual void SetUp() {
+// external_flag = GetParam();
+// }
+// }
+// INSTANTIATE_TEST_CASE_P(BoolSequence, FlagDependentTest, Bool());
+//
+inline internal::ParamGenerator<bool> Bool() {
+ return Values(false, true);
+}
+
+# if GTEST_HAS_COMBINE
+// Combine() allows the user to combine two or more sequences to produce
+// values of a Cartesian product of those sequences' elements.
+//
+// Synopsis:
+// Combine(gen1, gen2, ..., genN)
+// - returns a generator producing sequences with elements coming from
+// the Cartesian product of elements from the sequences generated by
+// gen1, gen2, ..., genN. The sequence elements will have a type of
+// tuple<T1, T2, ..., TN> where T1, T2, ..., TN are the types
+// of elements from sequences produces by gen1, gen2, ..., genN.
+//
+// Combine can have up to 10 arguments. This number is currently limited
+// by the maximum number of elements in the tuple implementation used by Google
+// Test.
+//
+// Example:
+//
+// This will instantiate tests in test case AnimalTest each one with
+// the parameter values tuple("cat", BLACK), tuple("cat", WHITE),
+// tuple("dog", BLACK), and tuple("dog", WHITE):
+//
+// enum Color { BLACK, GRAY, WHITE };
+// class AnimalTest
+// : public testing::TestWithParam<tuple<const char*, Color> > {...};
+//
+// TEST_P(AnimalTest, AnimalLooksNice) {...}
+//
+// INSTANTIATE_TEST_CASE_P(AnimalVariations, AnimalTest,
+// Combine(Values("cat", "dog"),
+// Values(BLACK, WHITE)));
+//
+// This will instantiate tests in FlagDependentTest with all variations of two
+// Boolean flags:
+//
+// class FlagDependentTest
+// : public testing::TestWithParam<tuple<bool, bool> > {
+// virtual void SetUp() {
+// // Assigns external_flag_1 and external_flag_2 values from the tuple.
+// tie(external_flag_1, external_flag_2) = GetParam();
+// }
+// };
+//
+// TEST_P(FlagDependentTest, TestFeature1) {
+// // Test your code using external_flag_1 and external_flag_2 here.
+// }
+// INSTANTIATE_TEST_CASE_P(TwoBoolSequence, FlagDependentTest,
+// Combine(Bool(), Bool()));
+//
+template <typename Generator1, typename Generator2>
+internal::CartesianProductHolder2<Generator1, Generator2> Combine(
+ const Generator1& g1, const Generator2& g2) {
+ return internal::CartesianProductHolder2<Generator1, Generator2>(
+ g1, g2);
+}
+
+template <typename Generator1, typename Generator2, typename Generator3>
+internal::CartesianProductHolder3<Generator1, Generator2, Generator3> Combine(
+ const Generator1& g1, const Generator2& g2, const Generator3& g3) {
+ return internal::CartesianProductHolder3<Generator1, Generator2, Generator3>(
+ g1, g2, g3);
+}
+
+template <typename Generator1, typename Generator2, typename Generator3,
+ typename Generator4>
+internal::CartesianProductHolder4<Generator1, Generator2, Generator3,
+ Generator4> Combine(
+ const Generator1& g1, const Generator2& g2, const Generator3& g3,
+ const Generator4& g4) {
+ return internal::CartesianProductHolder4<Generator1, Generator2, Generator3,
+ Generator4>(
+ g1, g2, g3, g4);
+}
+
+template <typename Generator1, typename Generator2, typename Generator3,
+ typename Generator4, typename Generator5>
+internal::CartesianProductHolder5<Generator1, Generator2, Generator3,
+ Generator4, Generator5> Combine(
+ const Generator1& g1, const Generator2& g2, const Generator3& g3,
+ const Generator4& g4, const Generator5& g5) {
+ return internal::CartesianProductHolder5<Generator1, Generator2, Generator3,
+ Generator4, Generator5>(
+ g1, g2, g3, g4, g5);
+}
+
+template <typename Generator1, typename Generator2, typename Generator3,
+ typename Generator4, typename Generator5, typename Generator6>
+internal::CartesianProductHolder6<Generator1, Generator2, Generator3,
+ Generator4, Generator5, Generator6> Combine(
+ const Generator1& g1, const Generator2& g2, const Generator3& g3,
+ const Generator4& g4, const Generator5& g5, const Generator6& g6) {
+ return internal::CartesianProductHolder6<Generator1, Generator2, Generator3,
+ Generator4, Generator5, Generator6>(
+ g1, g2, g3, g4, g5, g6);
+}
+
+template <typename Generator1, typename Generator2, typename Generator3,
+ typename Generator4, typename Generator5, typename Generator6,
+ typename Generator7>
+internal::CartesianProductHolder7<Generator1, Generator2, Generator3,
+ Generator4, Generator5, Generator6, Generator7> Combine(
+ const Generator1& g1, const Generator2& g2, const Generator3& g3,
+ const Generator4& g4, const Generator5& g5, const Generator6& g6,
+ const Generator7& g7) {
+ return internal::CartesianProductHolder7<Generator1, Generator2, Generator3,
+ Generator4, Generator5, Generator6, Generator7>(
+ g1, g2, g3, g4, g5, g6, g7);
+}
+
+template <typename Generator1, typename Generator2, typename Generator3,
+ typename Generator4, typename Generator5, typename Generator6,
+ typename Generator7, typename Generator8>
+internal::CartesianProductHolder8<Generator1, Generator2, Generator3,
+ Generator4, Generator5, Generator6, Generator7, Generator8> Combine(
+ const Generator1& g1, const Generator2& g2, const Generator3& g3,
+ const Generator4& g4, const Generator5& g5, const Generator6& g6,
+ const Generator7& g7, const Generator8& g8) {
+ return internal::CartesianProductHolder8<Generator1, Generator2, Generator3,
+ Generator4, Generator5, Generator6, Generator7, Generator8>(
+ g1, g2, g3, g4, g5, g6, g7, g8);
+}
+
+template <typename Generator1, typename Generator2, typename Generator3,
+ typename Generator4, typename Generator5, typename Generator6,
+ typename Generator7, typename Generator8, typename Generator9>
+internal::CartesianProductHolder9<Generator1, Generator2, Generator3,
+ Generator4, Generator5, Generator6, Generator7, Generator8,
+ Generator9> Combine(
+ const Generator1& g1, const Generator2& g2, const Generator3& g3,
+ const Generator4& g4, const Generator5& g5, const Generator6& g6,
+ const Generator7& g7, const Generator8& g8, const Generator9& g9) {
+ return internal::CartesianProductHolder9<Generator1, Generator2, Generator3,
+ Generator4, Generator5, Generator6, Generator7, Generator8, Generator9>(
+ g1, g2, g3, g4, g5, g6, g7, g8, g9);
+}
+
+template <typename Generator1, typename Generator2, typename Generator3,
+ typename Generator4, typename Generator5, typename Generator6,
+ typename Generator7, typename Generator8, typename Generator9,
+ typename Generator10>
+internal::CartesianProductHolder10<Generator1, Generator2, Generator3,
+ Generator4, Generator5, Generator6, Generator7, Generator8, Generator9,
+ Generator10> Combine(
+ const Generator1& g1, const Generator2& g2, const Generator3& g3,
+ const Generator4& g4, const Generator5& g5, const Generator6& g6,
+ const Generator7& g7, const Generator8& g8, const Generator9& g9,
+ const Generator10& g10) {
+ return internal::CartesianProductHolder10<Generator1, Generator2, Generator3,
+ Generator4, Generator5, Generator6, Generator7, Generator8, Generator9,
+ Generator10>(
+ g1, g2, g3, g4, g5, g6, g7, g8, g9, g10);
+}
+# endif // GTEST_HAS_COMBINE
+
+
+
+# define TEST_P(test_case_name, test_name) \
+ class GTEST_TEST_CLASS_NAME_(test_case_name, test_name) \
+ : public test_case_name { \
+ public: \
+ GTEST_TEST_CLASS_NAME_(test_case_name, test_name)() {} \
+ virtual void TestBody(); \
+ private: \
+ static int AddToRegistry() { \
+ ::testing::UnitTest::GetInstance()->parameterized_test_registry(). \
+ GetTestCasePatternHolder<test_case_name>(\
+ #test_case_name, __FILE__, __LINE__)->AddTestPattern(\
+ #test_case_name, \
+ #test_name, \
+ new ::testing::internal::TestMetaFactory< \
+ GTEST_TEST_CLASS_NAME_(test_case_name, test_name)>()); \
+ return 0; \
+ } \
+ static int gtest_registering_dummy_; \
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(\
+ GTEST_TEST_CLASS_NAME_(test_case_name, test_name)); \
+ }; \
+ int GTEST_TEST_CLASS_NAME_(test_case_name, \
+ test_name)::gtest_registering_dummy_ = \
+ GTEST_TEST_CLASS_NAME_(test_case_name, test_name)::AddToRegistry(); \
+ void GTEST_TEST_CLASS_NAME_(test_case_name, test_name)::TestBody()
+
+# define INSTANTIATE_TEST_CASE_P(prefix, test_case_name, generator) \
+ ::testing::internal::ParamGenerator<test_case_name::ParamType> \
+ gtest_##prefix##test_case_name##_EvalGenerator_() { return generator; } \
+ int gtest_##prefix##test_case_name##_dummy_ = \
+ ::testing::UnitTest::GetInstance()->parameterized_test_registry(). \
+ GetTestCasePatternHolder<test_case_name>(\
+ #test_case_name, __FILE__, __LINE__)->AddTestCaseInstantiation(\
+ #prefix, \
+ &gtest_##prefix##test_case_name##_EvalGenerator_, \
+ __FILE__, __LINE__)
+
+} // namespace testing
+
+#endif // GTEST_HAS_PARAM_TEST
+
+#endif // GTEST_INCLUDE_GTEST_GTEST_PARAM_TEST_H_
+// Copyright 2006, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan)
+//
+// Google C++ Testing Framework definitions useful in production code.
+
+#ifndef GTEST_INCLUDE_GTEST_GTEST_PROD_H_
+#define GTEST_INCLUDE_GTEST_GTEST_PROD_H_
+
+// When you need to test the private or protected members of a class,
+// use the FRIEND_TEST macro to declare your tests as friends of the
+// class. For example:
+//
+// class MyClass {
+// private:
+// void MyMethod();
+// FRIEND_TEST(MyClassTest, MyMethod);
+// };
+//
+// class MyClassTest : public testing::Test {
+// // ...
+// };
+//
+// TEST_F(MyClassTest, MyMethod) {
+// // Can call MyClass::MyMethod() here.
+// }
+
+#define FRIEND_TEST(test_case_name, test_name)\
+friend class test_case_name##_##test_name##_Test
+
+#endif // GTEST_INCLUDE_GTEST_GTEST_PROD_H_
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: mheule@google.com (Markus Heule)
+//
+
+#ifndef GTEST_INCLUDE_GTEST_GTEST_TEST_PART_H_
+#define GTEST_INCLUDE_GTEST_GTEST_TEST_PART_H_
+
+#include <iosfwd>
+#include <vector>
+
+namespace testing {
+
+// A copyable object representing the result of a test part (i.e. an
+// assertion or an explicit FAIL(), ADD_FAILURE(), or SUCCESS()).
+//
+// Don't inherit from TestPartResult as its destructor is not virtual.
+class GTEST_API_ TestPartResult {
+ public:
+ // The possible outcomes of a test part (i.e. an assertion or an
+ // explicit SUCCEED(), FAIL(), or ADD_FAILURE()).
+ enum Type {
+ kSuccess, // Succeeded.
+ kNonFatalFailure, // Failed but the test can continue.
+ kFatalFailure // Failed and the test should be terminated.
+ };
+
+ // C'tor. TestPartResult does NOT have a default constructor.
+ // Always use this constructor (with parameters) to create a
+ // TestPartResult object.
+ TestPartResult(Type a_type,
+ const char* a_file_name,
+ int a_line_number,
+ const char* a_message)
+ : type_(a_type),
+ file_name_(a_file_name == NULL ? "" : a_file_name),
+ line_number_(a_line_number),
+ summary_(ExtractSummary(a_message)),
+ message_(a_message) {
+ }
+
+ // Gets the outcome of the test part.
+ Type type() const { return type_; }
+
+ // Gets the name of the source file where the test part took place, or
+ // NULL if it's unknown.
+ const char* file_name() const {
+ return file_name_.empty() ? NULL : file_name_.c_str();
+ }
+
+ // Gets the line in the source file where the test part took place,
+ // or -1 if it's unknown.
+ int line_number() const { return line_number_; }
+
+ // Gets the summary of the failure message.
+ const char* summary() const { return summary_.c_str(); }
+
+ // Gets the message associated with the test part.
+ const char* message() const { return message_.c_str(); }
+
+ // Returns true iff the test part passed.
+ bool passed() const { return type_ == kSuccess; }
+
+ // Returns true iff the test part failed.
+ bool failed() const { return type_ != kSuccess; }
+
+ // Returns true iff the test part non-fatally failed.
+ bool nonfatally_failed() const { return type_ == kNonFatalFailure; }
+
+ // Returns true iff the test part fatally failed.
+ bool fatally_failed() const { return type_ == kFatalFailure; }
+
+ private:
+ Type type_;
+
+ // Gets the summary of the failure message by omitting the stack
+ // trace in it.
+ static std::string ExtractSummary(const char* message);
+
+ // The name of the source file where the test part took place, or
+ // "" if the source file is unknown.
+ std::string file_name_;
+ // The line in the source file where the test part took place, or -1
+ // if the line number is unknown.
+ int line_number_;
+ std::string summary_; // The test failure summary.
+ std::string message_; // The test failure message.
+};
+
+// Prints a TestPartResult object.
+std::ostream& operator<<(std::ostream& os, const TestPartResult& result);
+
+// An array of TestPartResult objects.
+//
+// Don't inherit from TestPartResultArray as its destructor is not
+// virtual.
+class GTEST_API_ TestPartResultArray {
+ public:
+ TestPartResultArray() {}
+
+ // Appends the given TestPartResult to the array.
+ void Append(const TestPartResult& result);
+
+ // Returns the TestPartResult at the given index (0-based).
+ const TestPartResult& GetTestPartResult(int index) const;
+
+ // Returns the number of TestPartResult objects in the array.
+ int size() const;
+
+ private:
+ std::vector<TestPartResult> array_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(TestPartResultArray);
+};
+
+// This interface knows how to report a test part result.
+class GTEST_API_ TestPartResultReporterInterface {
+ public:
+ virtual ~TestPartResultReporterInterface() {}
+
+ virtual void ReportTestPartResult(const TestPartResult& result) = 0;
+};
+
+namespace internal {
+
+// This helper class is used by {ASSERT|EXPECT}_NO_FATAL_FAILURE to check if a
+// statement generates new fatal failures. To do so it registers itself as the
+// current test part result reporter. Besides checking if fatal failures were
+// reported, it only delegates the reporting to the former result reporter.
+// The original result reporter is restored in the destructor.
+// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+class GTEST_API_ HasNewFatalFailureHelper
+ : public TestPartResultReporterInterface {
+ public:
+ HasNewFatalFailureHelper();
+ virtual ~HasNewFatalFailureHelper();
+ virtual void ReportTestPartResult(const TestPartResult& result);
+ bool has_new_fatal_failure() const { return has_new_fatal_failure_; }
+ private:
+ bool has_new_fatal_failure_;
+ TestPartResultReporterInterface* original_reporter_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(HasNewFatalFailureHelper);
+};
+
+} // namespace internal
+
+} // namespace testing
+
+#endif // GTEST_INCLUDE_GTEST_GTEST_TEST_PART_H_
+// Copyright 2008 Google Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan)
+
+#ifndef GTEST_INCLUDE_GTEST_GTEST_TYPED_TEST_H_
+#define GTEST_INCLUDE_GTEST_GTEST_TYPED_TEST_H_
+
+// This header implements typed tests and type-parameterized tests.
+
+// Typed (aka type-driven) tests repeat the same test for types in a
+// list. You must know which types you want to test with when writing
+// typed tests. Here's how you do it:
+
+#if 0
+
+// First, define a fixture class template. It should be parameterized
+// by a type. Remember to derive it from testing::Test.
+template <typename T>
+class FooTest : public testing::Test {
+ public:
+ ...
+ typedef std::list<T> List;
+ static T shared_;
+ T value_;
+};
+
+// Next, associate a list of types with the test case, which will be
+// repeated for each type in the list. The typedef is necessary for
+// the macro to parse correctly.
+typedef testing::Types<char, int, unsigned int> MyTypes;
+TYPED_TEST_CASE(FooTest, MyTypes);
+
+// If the type list contains only one type, you can write that type
+// directly without Types<...>:
+// TYPED_TEST_CASE(FooTest, int);
+
+// Then, use TYPED_TEST() instead of TEST_F() to define as many typed
+// tests for this test case as you want.
+TYPED_TEST(FooTest, DoesBlah) {
+ // Inside a test, refer to TypeParam to get the type parameter.
+ // Since we are inside a derived class template, C++ requires use to
+ // visit the members of FooTest via 'this'.
+ TypeParam n = this->value_;
+
+ // To visit static members of the fixture, add the TestFixture::
+ // prefix.
+ n += TestFixture::shared_;
+
+ // To refer to typedefs in the fixture, add the "typename
+ // TestFixture::" prefix.
+ typename TestFixture::List values;
+ values.push_back(n);
+ ...
+}
+
+TYPED_TEST(FooTest, HasPropertyA) { ... }
+
+#endif // 0
+
+// Type-parameterized tests are abstract test patterns parameterized
+// by a type. Compared with typed tests, type-parameterized tests
+// allow you to define the test pattern without knowing what the type
+// parameters are. The defined pattern can be instantiated with
+// different types any number of times, in any number of translation
+// units.
+//
+// If you are designing an interface or concept, you can define a
+// suite of type-parameterized tests to verify properties that any
+// valid implementation of the interface/concept should have. Then,
+// each implementation can easily instantiate the test suite to verify
+// that it conforms to the requirements, without having to write
+// similar tests repeatedly. Here's an example:
+
+#if 0
+
+// First, define a fixture class template. It should be parameterized
+// by a type. Remember to derive it from testing::Test.
+template <typename T>
+class FooTest : public testing::Test {
+ ...
+};
+
+// Next, declare that you will define a type-parameterized test case
+// (the _P suffix is for "parameterized" or "pattern", whichever you
+// prefer):
+TYPED_TEST_CASE_P(FooTest);
+
+// Then, use TYPED_TEST_P() to define as many type-parameterized tests
+// for this type-parameterized test case as you want.
+TYPED_TEST_P(FooTest, DoesBlah) {
+ // Inside a test, refer to TypeParam to get the type parameter.
+ TypeParam n = 0;
+ ...
+}
+
+TYPED_TEST_P(FooTest, HasPropertyA) { ... }
+
+// Now the tricky part: you need to register all test patterns before
+// you can instantiate them. The first argument of the macro is the
+// test case name; the rest are the names of the tests in this test
+// case.
+REGISTER_TYPED_TEST_CASE_P(FooTest,
+ DoesBlah, HasPropertyA);
+
+// Finally, you are free to instantiate the pattern with the types you
+// want. If you put the above code in a header file, you can #include
+// it in multiple C++ source files and instantiate it multiple times.
+//
+// To distinguish different instances of the pattern, the first
+// argument to the INSTANTIATE_* macro is a prefix that will be added
+// to the actual test case name. Remember to pick unique prefixes for
+// different instances.
+typedef testing::Types<char, int, unsigned int> MyTypes;
+INSTANTIATE_TYPED_TEST_CASE_P(My, FooTest, MyTypes);
+
+// If the type list contains only one type, you can write that type
+// directly without Types<...>:
+// INSTANTIATE_TYPED_TEST_CASE_P(My, FooTest, int);
+
+#endif // 0
+
+
+// Implements typed tests.
+
+#if GTEST_HAS_TYPED_TEST
+
+// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
+//
+// Expands to the name of the typedef for the type parameters of the
+// given test case.
+# define GTEST_TYPE_PARAMS_(TestCaseName) gtest_type_params_##TestCaseName##_
+
+// The 'Types' template argument below must have spaces around it
+// since some compilers may choke on '>>' when passing a template
+// instance (e.g. Types<int>)
+# define TYPED_TEST_CASE(CaseName, Types) \
+ typedef ::testing::internal::TypeList< Types >::type \
+ GTEST_TYPE_PARAMS_(CaseName)
+
+# define TYPED_TEST(CaseName, TestName) \
+ template <typename gtest_TypeParam_> \
+ class GTEST_TEST_CLASS_NAME_(CaseName, TestName) \
+ : public CaseName<gtest_TypeParam_> { \
+ private: \
+ typedef CaseName<gtest_TypeParam_> TestFixture; \
+ typedef gtest_TypeParam_ TypeParam; \
+ virtual void TestBody(); \
+ }; \
+ bool gtest_##CaseName##_##TestName##_registered_ GTEST_ATTRIBUTE_UNUSED_ = \
+ ::testing::internal::TypeParameterizedTest< \
+ CaseName, \
+ ::testing::internal::TemplateSel< \
+ GTEST_TEST_CLASS_NAME_(CaseName, TestName)>, \
+ GTEST_TYPE_PARAMS_(CaseName)>::Register(\
+ "", #CaseName, #TestName, 0); \
+ template <typename gtest_TypeParam_> \
+ void GTEST_TEST_CLASS_NAME_(CaseName, TestName)<gtest_TypeParam_>::TestBody()
+
+#endif // GTEST_HAS_TYPED_TEST
+
+// Implements type-parameterized tests.
+
+#if GTEST_HAS_TYPED_TEST_P
+
+// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
+//
+// Expands to the namespace name that the type-parameterized tests for
+// the given type-parameterized test case are defined in. The exact
+// name of the namespace is subject to change without notice.
+# define GTEST_CASE_NAMESPACE_(TestCaseName) \
+ gtest_case_##TestCaseName##_
+
+// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
+//
+// Expands to the name of the variable used to remember the names of
+// the defined tests in the given test case.
+# define GTEST_TYPED_TEST_CASE_P_STATE_(TestCaseName) \
+ gtest_typed_test_case_p_state_##TestCaseName##_
+
+// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE DIRECTLY.
+//
+// Expands to the name of the variable used to remember the names of
+// the registered tests in the given test case.
+# define GTEST_REGISTERED_TEST_NAMES_(TestCaseName) \
+ gtest_registered_test_names_##TestCaseName##_
+
+// The variables defined in the type-parameterized test macros are
+// static as typically these macros are used in a .h file that can be
+// #included in multiple translation units linked together.
+# define TYPED_TEST_CASE_P(CaseName) \
+ static ::testing::internal::TypedTestCasePState \
+ GTEST_TYPED_TEST_CASE_P_STATE_(CaseName)
+
+# define TYPED_TEST_P(CaseName, TestName) \
+ namespace GTEST_CASE_NAMESPACE_(CaseName) { \
+ template <typename gtest_TypeParam_> \
+ class TestName : public CaseName<gtest_TypeParam_> { \
+ private: \
+ typedef CaseName<gtest_TypeParam_> TestFixture; \
+ typedef gtest_TypeParam_ TypeParam; \
+ virtual void TestBody(); \
+ }; \
+ static bool gtest_##TestName##_defined_ GTEST_ATTRIBUTE_UNUSED_ = \
+ GTEST_TYPED_TEST_CASE_P_STATE_(CaseName).AddTestName(\
+ __FILE__, __LINE__, #CaseName, #TestName); \
+ } \
+ template <typename gtest_TypeParam_> \
+ void GTEST_CASE_NAMESPACE_(CaseName)::TestName<gtest_TypeParam_>::TestBody()
+
+# define REGISTER_TYPED_TEST_CASE_P(CaseName, ...) \
+ namespace GTEST_CASE_NAMESPACE_(CaseName) { \
+ typedef ::testing::internal::Templates<__VA_ARGS__>::type gtest_AllTests_; \
+ } \
+ static const char* const GTEST_REGISTERED_TEST_NAMES_(CaseName) = \
+ GTEST_TYPED_TEST_CASE_P_STATE_(CaseName).VerifyRegisteredTestNames(\
+ __FILE__, __LINE__, #__VA_ARGS__)
+
+// The 'Types' template argument below must have spaces around it
+// since some compilers may choke on '>>' when passing a template
+// instance (e.g. Types<int>)
+# define INSTANTIATE_TYPED_TEST_CASE_P(Prefix, CaseName, Types) \
+ bool gtest_##Prefix##_##CaseName GTEST_ATTRIBUTE_UNUSED_ = \
+ ::testing::internal::TypeParameterizedTestCase<CaseName, \
+ GTEST_CASE_NAMESPACE_(CaseName)::gtest_AllTests_, \
+ ::testing::internal::TypeList< Types >::type>::Register(\
+ #Prefix, #CaseName, GTEST_REGISTERED_TEST_NAMES_(CaseName))
+
+#endif // GTEST_HAS_TYPED_TEST_P
+
+#endif // GTEST_INCLUDE_GTEST_GTEST_TYPED_TEST_H_
+
+// Depending on the platform, different string classes are available.
+// On Linux, in addition to ::std::string, Google also makes use of
+// class ::string, which has the same interface as ::std::string, but
+// has a different implementation.
+//
+// The user can define GTEST_HAS_GLOBAL_STRING to 1 to indicate that
+// ::string is available AND is a distinct type to ::std::string, or
+// define it to 0 to indicate otherwise.
+//
+// If the user's ::std::string and ::string are the same class due to
+// aliasing, he should define GTEST_HAS_GLOBAL_STRING to 0.
+//
+// If the user doesn't define GTEST_HAS_GLOBAL_STRING, it is defined
+// heuristically.
+
+namespace testing {
+
+// Declares the flags.
+
+// This flag temporary enables the disabled tests.
+GTEST_DECLARE_bool_(also_run_disabled_tests);
+
+// This flag brings the debugger on an assertion failure.
+GTEST_DECLARE_bool_(break_on_failure);
+
+// This flag controls whether Google Test catches all test-thrown exceptions
+// and logs them as failures.
+GTEST_DECLARE_bool_(catch_exceptions);
+
+// This flag enables using colors in terminal output. Available values are
+// "yes" to enable colors, "no" (disable colors), or "auto" (the default)
+// to let Google Test decide.
+GTEST_DECLARE_string_(color);
+
+// This flag sets up the filter to select by name using a glob pattern
+// the tests to run. If the filter is not given all tests are executed.
+GTEST_DECLARE_string_(filter);
+
+// OpenCV extension: same as filter, but for the parameters string.
+GTEST_DECLARE_string_(param_filter);
+
+// This flag causes the Google Test to list tests. None of the tests listed
+// are actually run if the flag is provided.
+GTEST_DECLARE_bool_(list_tests);
+
+// This flag controls whether Google Test emits a detailed XML report to a file
+// in addition to its normal textual output.
+GTEST_DECLARE_string_(output);
+
+// This flags control whether Google Test prints the elapsed time for each
+// test.
+GTEST_DECLARE_bool_(print_time);
+
+// This flag specifies the random number seed.
+GTEST_DECLARE_int32_(random_seed);
+
+// This flag sets how many times the tests are repeated. The default value
+// is 1. If the value is -1 the tests are repeating forever.
+GTEST_DECLARE_int32_(repeat);
+
+// This flag controls whether Google Test includes Google Test internal
+// stack frames in failure stack traces.
+GTEST_DECLARE_bool_(show_internal_stack_frames);
+
+// When this flag is specified, tests' order is randomized on every iteration.
+GTEST_DECLARE_bool_(shuffle);
+
+// This flag specifies the maximum number of stack frames to be
+// printed in a failure message.
+GTEST_DECLARE_int32_(stack_trace_depth);
+
+// When this flag is specified, a failed assertion will throw an
+// exception if exceptions are enabled, or exit the program with a
+// non-zero code otherwise.
+GTEST_DECLARE_bool_(throw_on_failure);
+
+// When this flag is set with a "host:port" string, on supported
+// platforms test results are streamed to the specified port on
+// the specified host machine.
+GTEST_DECLARE_string_(stream_result_to);
+
+// The upper limit for valid stack trace depths.
+const int kMaxStackTraceDepth = 100;
+
+namespace internal {
+
+class AssertHelper;
+class DefaultGlobalTestPartResultReporter;
+class ExecDeathTest;
+class NoExecDeathTest;
+class FinalSuccessChecker;
+class GTestFlagSaver;
+class StreamingListenerTest;
+class TestResultAccessor;
+class TestEventListenersAccessor;
+class TestEventRepeater;
+class UnitTestRecordPropertyTestHelper;
+class WindowsDeathTest;
+class UnitTestImpl* GetUnitTestImpl();
+void ReportFailureInUnknownLocation(TestPartResult::Type result_type,
+ const std::string& message);
+
+} // namespace internal
+
+// The friend relationship of some of these classes is cyclic.
+// If we don't forward declare them the compiler might confuse the classes
+// in friendship clauses with same named classes on the scope.
+class Test;
+class TestCase;
+class TestInfo;
+class UnitTest;
+
+// A class for indicating whether an assertion was successful. When
+// the assertion wasn't successful, the AssertionResult object
+// remembers a non-empty message that describes how it failed.
+//
+// To create an instance of this class, use one of the factory functions
+// (AssertionSuccess() and AssertionFailure()).
+//
+// This class is useful for two purposes:
+// 1. Defining predicate functions to be used with Boolean test assertions
+// EXPECT_TRUE/EXPECT_FALSE and their ASSERT_ counterparts
+// 2. Defining predicate-format functions to be
+// used with predicate assertions (ASSERT_PRED_FORMAT*, etc).
+//
+// For example, if you define IsEven predicate:
+//
+// testing::AssertionResult IsEven(int n) {
+// if ((n % 2) == 0)
+// return testing::AssertionSuccess();
+// else
+// return testing::AssertionFailure() << n << " is odd";
+// }
+//
+// Then the failed expectation EXPECT_TRUE(IsEven(Fib(5)))
+// will print the message
+//
+// Value of: IsEven(Fib(5))
+// Actual: false (5 is odd)
+// Expected: true
+//
+// instead of a more opaque
+//
+// Value of: IsEven(Fib(5))
+// Actual: false
+// Expected: true
+//
+// in case IsEven is a simple Boolean predicate.
+//
+// If you expect your predicate to be reused and want to support informative
+// messages in EXPECT_FALSE and ASSERT_FALSE (negative assertions show up
+// about half as often as positive ones in our tests), supply messages for
+// both success and failure cases:
+//
+// testing::AssertionResult IsEven(int n) {
+// if ((n % 2) == 0)
+// return testing::AssertionSuccess() << n << " is even";
+// else
+// return testing::AssertionFailure() << n << " is odd";
+// }
+//
+// Then a statement EXPECT_FALSE(IsEven(Fib(6))) will print
+//
+// Value of: IsEven(Fib(6))
+// Actual: true (8 is even)
+// Expected: false
+//
+// NB: Predicates that support negative Boolean assertions have reduced
+// performance in positive ones so be careful not to use them in tests
+// that have lots (tens of thousands) of positive Boolean assertions.
+//
+// To use this class with EXPECT_PRED_FORMAT assertions such as:
+//
+// // Verifies that Foo() returns an even number.
+// EXPECT_PRED_FORMAT1(IsEven, Foo());
+//
+// you need to define:
+//
+// testing::AssertionResult IsEven(const char* expr, int n) {
+// if ((n % 2) == 0)
+// return testing::AssertionSuccess();
+// else
+// return testing::AssertionFailure()
+// << "Expected: " << expr << " is even\n Actual: it's " << n;
+// }
+//
+// If Foo() returns 5, you will see the following message:
+//
+// Expected: Foo() is even
+// Actual: it's 5
+//
+class GTEST_API_ AssertionResult {
+ public:
+ // Copy constructor.
+ // Used in EXPECT_TRUE/FALSE(assertion_result).
+ AssertionResult(const AssertionResult& other);
+ // Used in the EXPECT_TRUE/FALSE(bool_expression).
+ explicit AssertionResult(bool success) : success_(success) {}
+
+ // Returns true iff the assertion succeeded.
+ operator bool() const { return success_; } // NOLINT
+
+ // Returns the assertion's negation. Used with EXPECT/ASSERT_FALSE.
+ AssertionResult operator!() const;
+
+ // Returns the text streamed into this AssertionResult. Test assertions
+ // use it when they fail (i.e., the predicate's outcome doesn't match the
+ // assertion's expectation). When nothing has been streamed into the
+ // object, returns an empty string.
+ const char* message() const {
+ return message_.get() != NULL ? message_->c_str() : "";
+ }
+ // TODO(vladl@google.com): Remove this after making sure no clients use it.
+ // Deprecated; please use message() instead.
+ const char* failure_message() const { return message(); }
+
+ // Streams a custom failure message into this object.
+ template <typename T> AssertionResult& operator<<(const T& value) {
+ AppendMessage(Message() << value);
+ return *this;
+ }
+
+ // Allows streaming basic output manipulators such as endl or flush into
+ // this object.
+ AssertionResult& operator<<(
+ ::std::ostream& (*basic_manipulator)(::std::ostream& stream)) {
+ AppendMessage(Message() << basic_manipulator);
+ return *this;
+ }
+
+ private:
+ // Appends the contents of message to message_.
+ void AppendMessage(const Message& a_message) {
+ if (message_.get() == NULL)
+ message_.reset(new ::std::string);
+ message_->append(a_message.GetString().c_str());
+ }
+
+ // Stores result of the assertion predicate.
+ bool success_;
+ // Stores the message describing the condition in case the expectation
+ // construct is not satisfied with the predicate's outcome.
+ // Referenced via a pointer to avoid taking too much stack frame space
+ // with test assertions.
+ internal::scoped_ptr< ::std::string> message_;
+
+ GTEST_DISALLOW_ASSIGN_(AssertionResult);
+};
+
+// Makes a successful assertion result.
+GTEST_API_ AssertionResult AssertionSuccess();
+
+// Makes a failed assertion result.
+GTEST_API_ AssertionResult AssertionFailure();
+
+// Makes a failed assertion result with the given failure message.
+// Deprecated; use AssertionFailure() << msg.
+GTEST_API_ AssertionResult AssertionFailure(const Message& msg);
+
+// The abstract class that all tests inherit from.
+//
+// In Google Test, a unit test program contains one or many TestCases, and
+// each TestCase contains one or many Tests.
+//
+// When you define a test using the TEST macro, you don't need to
+// explicitly derive from Test - the TEST macro automatically does
+// this for you.
+//
+// The only time you derive from Test is when defining a test fixture
+// to be used a TEST_F. For example:
+//
+// class FooTest : public testing::Test {
+// protected:
+// virtual void SetUp() { ... }
+// virtual void TearDown() { ... }
+// ...
+// };
+//
+// TEST_F(FooTest, Bar) { ... }
+// TEST_F(FooTest, Baz) { ... }
+//
+// Test is not copyable.
+class GTEST_API_ Test {
+ public:
+ friend class TestInfo;
+
+ // Defines types for pointers to functions that set up and tear down
+ // a test case.
+ typedef internal::SetUpTestCaseFunc SetUpTestCaseFunc;
+ typedef internal::TearDownTestCaseFunc TearDownTestCaseFunc;
+
+ // The d'tor is virtual as we intend to inherit from Test.
+ virtual ~Test();
+
+ // Sets up the stuff shared by all tests in this test case.
+ //
+ // Google Test will call Foo::SetUpTestCase() before running the first
+ // test in test case Foo. Hence a sub-class can define its own
+ // SetUpTestCase() method to shadow the one defined in the super
+ // class.
+ static void SetUpTestCase() {}
+
+ // Tears down the stuff shared by all tests in this test case.
+ //
+ // Google Test will call Foo::TearDownTestCase() after running the last
+ // test in test case Foo. Hence a sub-class can define its own
+ // TearDownTestCase() method to shadow the one defined in the super
+ // class.
+ static void TearDownTestCase() {}
+
+ // Returns true iff the current test has a fatal failure.
+ static bool HasFatalFailure();
+
+ // Returns true iff the current test has a non-fatal failure.
+ static bool HasNonfatalFailure();
+
+ // Returns true iff the current test has a (either fatal or
+ // non-fatal) failure.
+ static bool HasFailure() { return HasFatalFailure() || HasNonfatalFailure(); }
+
+ // Logs a property for the current test, test case, or for the entire
+ // invocation of the test program when used outside of the context of a
+ // test case. Only the last value for a given key is remembered. These
+ // are public static so they can be called from utility functions that are
+ // not members of the test fixture. Calls to RecordProperty made during
+ // lifespan of the test (from the moment its constructor starts to the
+ // moment its destructor finishes) will be output in XML as attributes of
+ // the <testcase> element. Properties recorded from fixture's
+ // SetUpTestCase or TearDownTestCase are logged as attributes of the
+ // corresponding <testsuite> element. Calls to RecordProperty made in the
+ // global context (before or after invocation of RUN_ALL_TESTS and from
+ // SetUp/TearDown method of Environment objects registered with Google
+ // Test) will be output as attributes of the <testsuites> element.
+ static void RecordProperty(const std::string& key, const std::string& value);
+ static void RecordProperty(const std::string& key, int value);
+
+ protected:
+ // Creates a Test object.
+ Test();
+
+ // Sets up the test fixture.
+ virtual void SetUp();
+
+ // Tears down the test fixture.
+ virtual void TearDown();
+
+ private:
+ // Returns true iff the current test has the same fixture class as
+ // the first test in the current test case.
+ static bool HasSameFixtureClass();
+
+ // Runs the test after the test fixture has been set up.
+ //
+ // A sub-class must implement this to define the test logic.
+ //
+ // DO NOT OVERRIDE THIS FUNCTION DIRECTLY IN A USER PROGRAM.
+ // Instead, use the TEST or TEST_F macro.
+ virtual void TestBody() = 0;
+
+ // Sets up, executes, and tears down the test.
+ void Run();
+
+ // Deletes self. We deliberately pick an unusual name for this
+ // internal method to avoid clashing with names used in user TESTs.
+ void DeleteSelf_() { delete this; }
+
+ // Uses a GTestFlagSaver to save and restore all Google Test flags.
+ const internal::GTestFlagSaver* const gtest_flag_saver_;
+
+ // Often a user mis-spells SetUp() as Setup() and spends a long time
+ // wondering why it is never called by Google Test. The declaration of
+ // the following method is solely for catching such an error at
+ // compile time:
+ //
+ // - The return type is deliberately chosen to be not void, so it
+ // will be a conflict if a user declares void Setup() in his test
+ // fixture.
+ //
+ // - This method is private, so it will be another compiler error
+ // if a user calls it from his test fixture.
+ //
+ // DO NOT OVERRIDE THIS FUNCTION.
+ //
+ // If you see an error about overriding the following function or
+ // about it being private, you have mis-spelled SetUp() as Setup().
+ struct Setup_should_be_spelled_SetUp {};
+ virtual Setup_should_be_spelled_SetUp* Setup() { return NULL; }
+
+ // We disallow copying Tests.
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(Test);
+};
+
+typedef internal::TimeInMillis TimeInMillis;
+
+// A copyable object representing a user specified test property which can be
+// output as a key/value string pair.
+//
+// Don't inherit from TestProperty as its destructor is not virtual.
+class TestProperty {
+ public:
+ // C'tor. TestProperty does NOT have a default constructor.
+ // Always use this constructor (with parameters) to create a
+ // TestProperty object.
+ TestProperty(const std::string& a_key, const std::string& a_value) :
+ key_(a_key), value_(a_value) {
+ }
+
+ // Gets the user supplied key.
+ const char* key() const {
+ return key_.c_str();
+ }
+
+ // Gets the user supplied value.
+ const char* value() const {
+ return value_.c_str();
+ }
+
+ // Sets a new value, overriding the one supplied in the constructor.
+ void SetValue(const std::string& new_value) {
+ value_ = new_value;
+ }
+
+ private:
+ // The key supplied by the user.
+ std::string key_;
+ // The value supplied by the user.
+ std::string value_;
+};
+
+// The result of a single Test. This includes a list of
+// TestPartResults, a list of TestProperties, a count of how many
+// death tests there are in the Test, and how much time it took to run
+// the Test.
+//
+// TestResult is not copyable.
+class GTEST_API_ TestResult {
+ public:
+ // Creates an empty TestResult.
+ TestResult();
+
+ // D'tor. Do not inherit from TestResult.
+ ~TestResult();
+
+ // Gets the number of all test parts. This is the sum of the number
+ // of successful test parts and the number of failed test parts.
+ int total_part_count() const;
+
+ // Returns the number of the test properties.
+ int test_property_count() const;
+
+ // Returns true iff the test passed (i.e. no test part failed).
+ bool Passed() const { return !Failed(); }
+
+ // Returns true iff the test failed.
+ bool Failed() const;
+
+ // Returns true iff the test fatally failed.
+ bool HasFatalFailure() const;
+
+ // Returns true iff the test has a non-fatal failure.
+ bool HasNonfatalFailure() const;
+
+ // Returns the elapsed time, in milliseconds.
+ TimeInMillis elapsed_time() const { return elapsed_time_; }
+
+ // Returns the i-th test part result among all the results. i can range
+ // from 0 to test_property_count() - 1. If i is not in that range, aborts
+ // the program.
+ const TestPartResult& GetTestPartResult(int i) const;
+
+ // Returns the i-th test property. i can range from 0 to
+ // test_property_count() - 1. If i is not in that range, aborts the
+ // program.
+ const TestProperty& GetTestProperty(int i) const;
+
+ private:
+ friend class TestInfo;
+ friend class TestCase;
+ friend class UnitTest;
+ friend class internal::DefaultGlobalTestPartResultReporter;
+ friend class internal::ExecDeathTest;
+ friend class internal::TestResultAccessor;
+ friend class internal::UnitTestImpl;
+ friend class internal::WindowsDeathTest;
+
+ // Gets the vector of TestPartResults.
+ const std::vector<TestPartResult>& test_part_results() const {
+ return test_part_results_;
+ }
+
+ // Gets the vector of TestProperties.
+ const std::vector<TestProperty>& test_properties() const {
+ return test_properties_;
+ }
+
+ // Sets the elapsed time.
+ void set_elapsed_time(TimeInMillis elapsed) { elapsed_time_ = elapsed; }
+
+ // Adds a test property to the list. The property is validated and may add
+ // a non-fatal failure if invalid (e.g., if it conflicts with reserved
+ // key names). If a property is already recorded for the same key, the
+ // value will be updated, rather than storing multiple values for the same
+ // key. xml_element specifies the element for which the property is being
+ // recorded and is used for validation.
+ void RecordProperty(const std::string& xml_element,
+ const TestProperty& test_property);
+
+ // Adds a failure if the key is a reserved attribute of Google Test
+ // testcase tags. Returns true if the property is valid.
+ // TODO(russr): Validate attribute names are legal and human readable.
+ static bool ValidateTestProperty(const std::string& xml_element,
+ const TestProperty& test_property);
+
+ // Adds a test part result to the list.
+ void AddTestPartResult(const TestPartResult& test_part_result);
+
+ // Returns the death test count.
+ int death_test_count() const { return death_test_count_; }
+
+ // Increments the death test count, returning the new count.
+ int increment_death_test_count() { return ++death_test_count_; }
+
+ // Clears the test part results.
+ void ClearTestPartResults();
+
+ // Clears the object.
+ void Clear();
+
+ // Protects mutable state of the property vector and of owned
+ // properties, whose values may be updated.
+ internal::Mutex test_properites_mutex_;
+
+ // The vector of TestPartResults
+ std::vector<TestPartResult> test_part_results_;
+ // The vector of TestProperties
+ std::vector<TestProperty> test_properties_;
+ // Running count of death tests.
+ int death_test_count_;
+ // The elapsed time, in milliseconds.
+ TimeInMillis elapsed_time_;
+
+ // We disallow copying TestResult.
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(TestResult);
+}; // class TestResult
+
+// A TestInfo object stores the following information about a test:
+//
+// Test case name
+// Test name
+// Whether the test should be run
+// A function pointer that creates the test object when invoked
+// Test result
+//
+// The constructor of TestInfo registers itself with the UnitTest
+// singleton such that the RUN_ALL_TESTS() macro knows which tests to
+// run.
+class GTEST_API_ TestInfo {
+ public:
+ // Destructs a TestInfo object. This function is not virtual, so
+ // don't inherit from TestInfo.
+ ~TestInfo();
+
+ // Returns the test case name.
+ const char* test_case_name() const { return test_case_name_.c_str(); }
+
+ // Returns the test name.
+ const char* name() const { return name_.c_str(); }
+
+ // Returns the name of the parameter type, or NULL if this is not a typed
+ // or a type-parameterized test.
+ const char* type_param() const {
+ if (type_param_.get() != NULL)
+ return type_param_->c_str();
+ return NULL;
+ }
+
+ // Returns the text representation of the value parameter, or NULL if this
+ // is not a value-parameterized test.
+ const char* value_param() const {
+ if (value_param_.get() != NULL)
+ return value_param_->c_str();
+ return NULL;
+ }
+
+ // Returns true if this test should run, that is if the test is not
+ // disabled (or it is disabled but the also_run_disabled_tests flag has
+ // been specified) and its full name matches the user-specified filter.
+ //
+ // Google Test allows the user to filter the tests by their full names.
+ // The full name of a test Bar in test case Foo is defined as
+ // "Foo.Bar". Only the tests that match the filter will run.
+ //
+ // A filter is a colon-separated list of glob (not regex) patterns,
+ // optionally followed by a '-' and a colon-separated list of
+ // negative patterns (tests to exclude). A test is run if it
+ // matches one of the positive patterns and does not match any of
+ // the negative patterns.
+ //
+ // For example, *A*:Foo.* is a filter that matches any string that
+ // contains the character 'A' or starts with "Foo.".
+ bool should_run() const { return should_run_; }
+
+ // Returns true iff this test will appear in the XML report.
+ bool is_reportable() const {
+ // For now, the XML report includes all tests matching the filter.
+ // In the future, we may trim tests that are excluded because of
+ // sharding.
+ return matches_filter_;
+ }
+
+ // Returns the result of the test.
+ const TestResult* result() const { return &result_; }
+
+ private:
+#if GTEST_HAS_DEATH_TEST
+ friend class internal::DefaultDeathTestFactory;
+#endif // GTEST_HAS_DEATH_TEST
+ friend class Test;
+ friend class TestCase;
+ friend class internal::UnitTestImpl;
+ friend class internal::StreamingListenerTest;
+ friend TestInfo* internal::MakeAndRegisterTestInfo(
+ const char* test_case_name,
+ const char* name,
+ const char* type_param,
+ const char* value_param,
+ internal::TypeId fixture_class_id,
+ Test::SetUpTestCaseFunc set_up_tc,
+ Test::TearDownTestCaseFunc tear_down_tc,
+ internal::TestFactoryBase* factory);
+
+ // Constructs a TestInfo object. The newly constructed instance assumes
+ // ownership of the factory object.
+ TestInfo(const std::string& test_case_name,
+ const std::string& name,
+ const char* a_type_param, // NULL if not a type-parameterized test
+ const char* a_value_param, // NULL if not a value-parameterized test
+ internal::TypeId fixture_class_id,
+ internal::TestFactoryBase* factory);
+
+ // Increments the number of death tests encountered in this test so
+ // far.
+ int increment_death_test_count() {
+ return result_.increment_death_test_count();
+ }
+
+ // Creates the test object, runs it, records its result, and then
+ // deletes it.
+ void Run();
+
+ static void ClearTestResult(TestInfo* test_info) {
+ test_info->result_.Clear();
+ }
+
+ // These fields are immutable properties of the test.
+ const std::string test_case_name_; // Test case name
+ const std::string name_; // Test name
+ // Name of the parameter type, or NULL if this is not a typed or a
+ // type-parameterized test.
+ const internal::scoped_ptr<const ::std::string> type_param_;
+ // Text representation of the value parameter, or NULL if this is not a
+ // value-parameterized test.
+ const internal::scoped_ptr<const ::std::string> value_param_;
+ const internal::TypeId fixture_class_id_; // ID of the test fixture class
+ bool should_run_; // True iff this test should run
+ bool is_disabled_; // True iff this test is disabled
+ bool matches_filter_; // True if this test matches the
+ // user-specified filter.
+ internal::TestFactoryBase* const factory_; // The factory that creates
+ // the test object
+
+ // This field is mutable and needs to be reset before running the
+ // test for the second time.
+ TestResult result_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(TestInfo);
+};
+
+// A test case, which consists of a vector of TestInfos.
+//
+// TestCase is not copyable.
+class GTEST_API_ TestCase {
+ public:
+ // Creates a TestCase with the given name.
+ //
+ // TestCase does NOT have a default constructor. Always use this
+ // constructor to create a TestCase object.
+ //
+ // Arguments:
+ //
+ // name: name of the test case
+ // a_type_param: the name of the test's type parameter, or NULL if
+ // this is not a type-parameterized test.
+ // set_up_tc: pointer to the function that sets up the test case
+ // tear_down_tc: pointer to the function that tears down the test case
+ TestCase(const char* name, const char* a_type_param,
+ Test::SetUpTestCaseFunc set_up_tc,
+ Test::TearDownTestCaseFunc tear_down_tc);
+
+ // Destructor of TestCase.
+ virtual ~TestCase();
+
+ // Gets the name of the TestCase.
+ const char* name() const { return name_.c_str(); }
+
+ // Returns the name of the parameter type, or NULL if this is not a
+ // type-parameterized test case.
+ const char* type_param() const {
+ if (type_param_.get() != NULL)
+ return type_param_->c_str();
+ return NULL;
+ }
+
+ // Returns true if any test in this test case should run.
+ bool should_run() const { return should_run_; }
+
+ // Gets the number of successful tests in this test case.
+ int successful_test_count() const;
+
+ // Gets the number of failed tests in this test case.
+ int failed_test_count() const;
+
+ // Gets the number of disabled tests that will be reported in the XML report.
+ int reportable_disabled_test_count() const;
+
+ // Gets the number of disabled tests in this test case.
+ int disabled_test_count() const;
+
+ // Gets the number of tests to be printed in the XML report.
+ int reportable_test_count() const;
+
+ // Get the number of tests in this test case that should run.
+ int test_to_run_count() const;
+
+ // Gets the number of all tests in this test case.
+ int total_test_count() const;
+
+ // Returns true iff the test case passed.
+ bool Passed() const { return !Failed(); }
+
+ // Returns true iff the test case failed.
+ bool Failed() const { return failed_test_count() > 0; }
+
+ // Returns the elapsed time, in milliseconds.
+ TimeInMillis elapsed_time() const { return elapsed_time_; }
+
+ // Returns the i-th test among all the tests. i can range from 0 to
+ // total_test_count() - 1. If i is not in that range, returns NULL.
+ const TestInfo* GetTestInfo(int i) const;
+
+ // Returns the TestResult that holds test properties recorded during
+ // execution of SetUpTestCase and TearDownTestCase.
+ const TestResult& ad_hoc_test_result() const { return ad_hoc_test_result_; }
+
+ private:
+ friend class Test;
+ friend class internal::UnitTestImpl;
+
+ // Gets the (mutable) vector of TestInfos in this TestCase.
+ std::vector<TestInfo*>& test_info_list() { return test_info_list_; }
+
+ // Gets the (immutable) vector of TestInfos in this TestCase.
+ const std::vector<TestInfo*>& test_info_list() const {
+ return test_info_list_;
+ }
+
+ // Returns the i-th test among all the tests. i can range from 0 to
+ // total_test_count() - 1. If i is not in that range, returns NULL.
+ TestInfo* GetMutableTestInfo(int i);
+
+ // Sets the should_run member.
+ void set_should_run(bool should) { should_run_ = should; }
+
+ // Adds a TestInfo to this test case. Will delete the TestInfo upon
+ // destruction of the TestCase object.
+ void AddTestInfo(TestInfo * test_info);
+
+ // Clears the results of all tests in this test case.
+ void ClearResult();
+
+ // Clears the results of all tests in the given test case.
+ static void ClearTestCaseResult(TestCase* test_case) {
+ test_case->ClearResult();
+ }
+
+ // Runs every test in this TestCase.
+ void Run();
+
+ // Runs SetUpTestCase() for this TestCase. This wrapper is needed
+ // for catching exceptions thrown from SetUpTestCase().
+ void RunSetUpTestCase() { (*set_up_tc_)(); }
+
+ // Runs TearDownTestCase() for this TestCase. This wrapper is
+ // needed for catching exceptions thrown from TearDownTestCase().
+ void RunTearDownTestCase() { (*tear_down_tc_)(); }
+
+ // Returns true iff test passed.
+ static bool TestPassed(const TestInfo* test_info) {
+ return test_info->should_run() && test_info->result()->Passed();
+ }
+
+ // Returns true iff test failed.
+ static bool TestFailed(const TestInfo* test_info) {
+ return test_info->should_run() && test_info->result()->Failed();
+ }
+
+ // Returns true iff the test is disabled and will be reported in the XML
+ // report.
+ static bool TestReportableDisabled(const TestInfo* test_info) {
+ return test_info->is_reportable() && test_info->is_disabled_;
+ }
+
+ // Returns true iff test is disabled.
+ static bool TestDisabled(const TestInfo* test_info) {
+ return test_info->is_disabled_;
+ }
+
+ // Returns true iff this test will appear in the XML report.
+ static bool TestReportable(const TestInfo* test_info) {
+ return test_info->is_reportable();
+ }
+
+ // Returns true if the given test should run.
+ static bool ShouldRunTest(const TestInfo* test_info) {
+ return test_info->should_run();
+ }
+
+ // Shuffles the tests in this test case.
+ void ShuffleTests(internal::Random* random);
+
+ // Restores the test order to before the first shuffle.
+ void UnshuffleTests();
+
+ // Name of the test case.
+ std::string name_;
+ // Name of the parameter type, or NULL if this is not a typed or a
+ // type-parameterized test.
+ const internal::scoped_ptr<const ::std::string> type_param_;
+ // The vector of TestInfos in their original order. It owns the
+ // elements in the vector.
+ std::vector<TestInfo*> test_info_list_;
+ // Provides a level of indirection for the test list to allow easy
+ // shuffling and restoring the test order. The i-th element in this
+ // vector is the index of the i-th test in the shuffled test list.
+ std::vector<int> test_indices_;
+ // Pointer to the function that sets up the test case.
+ Test::SetUpTestCaseFunc set_up_tc_;
+ // Pointer to the function that tears down the test case.
+ Test::TearDownTestCaseFunc tear_down_tc_;
+ // True iff any test in this test case should run.
+ bool should_run_;
+ // Elapsed time, in milliseconds.
+ TimeInMillis elapsed_time_;
+ // Holds test properties recorded during execution of SetUpTestCase and
+ // TearDownTestCase.
+ TestResult ad_hoc_test_result_;
+
+ // We disallow copying TestCases.
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(TestCase);
+};
+
+// An Environment object is capable of setting up and tearing down an
+// environment. The user should subclass this to define his own
+// environment(s).
+//
+// An Environment object does the set-up and tear-down in virtual
+// methods SetUp() and TearDown() instead of the constructor and the
+// destructor, as:
+//
+// 1. You cannot safely throw from a destructor. This is a problem
+// as in some cases Google Test is used where exceptions are enabled, and
+// we may want to implement ASSERT_* using exceptions where they are
+// available.
+// 2. You cannot use ASSERT_* directly in a constructor or
+// destructor.
+class Environment {
+ public:
+ // The d'tor is virtual as we need to subclass Environment.
+ virtual ~Environment() {}
+
+ // Override this to define how to set up the environment.
+ virtual void SetUp() {}
+
+ // Override this to define how to tear down the environment.
+ virtual void TearDown() {}
+ private:
+ // If you see an error about overriding the following function or
+ // about it being private, you have mis-spelled SetUp() as Setup().
+ struct Setup_should_be_spelled_SetUp {};
+ virtual Setup_should_be_spelled_SetUp* Setup() { return NULL; }
+};
+
+// The interface for tracing execution of tests. The methods are organized in
+// the order the corresponding events are fired.
+class TestEventListener {
+ public:
+ virtual ~TestEventListener() {}
+
+ // Fired before any test activity starts.
+ virtual void OnTestProgramStart(const UnitTest& unit_test) = 0;
+
+ // Fired before each iteration of tests starts. There may be more than
+ // one iteration if GTEST_FLAG(repeat) is set. iteration is the iteration
+ // index, starting from 0.
+ virtual void OnTestIterationStart(const UnitTest& unit_test,
+ int iteration) = 0;
+
+ // Fired before environment set-up for each iteration of tests starts.
+ virtual void OnEnvironmentsSetUpStart(const UnitTest& unit_test) = 0;
+
+ // Fired after environment set-up for each iteration of tests ends.
+ virtual void OnEnvironmentsSetUpEnd(const UnitTest& unit_test) = 0;
+
+ // Fired before the test case starts.
+ virtual void OnTestCaseStart(const TestCase& test_case) = 0;
+
+ // Fired before the test starts.
+ virtual void OnTestStart(const TestInfo& test_info) = 0;
+
+ // Fired after a failed assertion or a SUCCEED() invocation.
+ virtual void OnTestPartResult(const TestPartResult& test_part_result) = 0;
+
+ // Fired after the test ends.
+ virtual void OnTestEnd(const TestInfo& test_info) = 0;
+
+ // Fired after the test case ends.
+ virtual void OnTestCaseEnd(const TestCase& test_case) = 0;
+
+ // Fired before environment tear-down for each iteration of tests starts.
+ virtual void OnEnvironmentsTearDownStart(const UnitTest& unit_test) = 0;
+
+ // Fired after environment tear-down for each iteration of tests ends.
+ virtual void OnEnvironmentsTearDownEnd(const UnitTest& unit_test) = 0;
+
+ // Fired after each iteration of tests finishes.
+ virtual void OnTestIterationEnd(const UnitTest& unit_test,
+ int iteration) = 0;
+
+ // Fired after all test activities have ended.
+ virtual void OnTestProgramEnd(const UnitTest& unit_test) = 0;
+};
+
+// The convenience class for users who need to override just one or two
+// methods and are not concerned that a possible change to a signature of
+// the methods they override will not be caught during the build. For
+// comments about each method please see the definition of TestEventListener
+// above.
+class EmptyTestEventListener : public TestEventListener {
+ public:
+ virtual void OnTestProgramStart(const UnitTest& /*unit_test*/) {}
+ virtual void OnTestIterationStart(const UnitTest& /*unit_test*/,
+ int /*iteration*/) {}
+ virtual void OnEnvironmentsSetUpStart(const UnitTest& /*unit_test*/) {}
+ virtual void OnEnvironmentsSetUpEnd(const UnitTest& /*unit_test*/) {}
+ virtual void OnTestCaseStart(const TestCase& /*test_case*/) {}
+ virtual void OnTestStart(const TestInfo& /*test_info*/) {}
+ virtual void OnTestPartResult(const TestPartResult& /*test_part_result*/) {}
+ virtual void OnTestEnd(const TestInfo& /*test_info*/) {}
+ virtual void OnTestCaseEnd(const TestCase& /*test_case*/) {}
+ virtual void OnEnvironmentsTearDownStart(const UnitTest& /*unit_test*/) {}
+ virtual void OnEnvironmentsTearDownEnd(const UnitTest& /*unit_test*/) {}
+ virtual void OnTestIterationEnd(const UnitTest& /*unit_test*/,
+ int /*iteration*/) {}
+ virtual void OnTestProgramEnd(const UnitTest& /*unit_test*/) {}
+};
+
+// TestEventListeners lets users add listeners to track events in Google Test.
+class GTEST_API_ TestEventListeners {
+ public:
+ TestEventListeners();
+ ~TestEventListeners();
+
+ // Appends an event listener to the end of the list. Google Test assumes
+ // the ownership of the listener (i.e. it will delete the listener when
+ // the test program finishes).
+ void Append(TestEventListener* listener);
+
+ // Removes the given event listener from the list and returns it. It then
+ // becomes the caller's responsibility to delete the listener. Returns
+ // NULL if the listener is not found in the list.
+ TestEventListener* Release(TestEventListener* listener);
+
+ // Returns the standard listener responsible for the default console
+ // output. Can be removed from the listeners list to shut down default
+ // console output. Note that removing this object from the listener list
+ // with Release transfers its ownership to the caller and makes this
+ // function return NULL the next time.
+ TestEventListener* default_result_printer() const {
+ return default_result_printer_;
+ }
+
+ // Returns the standard listener responsible for the default XML output
+ // controlled by the --gtest_output=xml flag. Can be removed from the
+ // listeners list by users who want to shut down the default XML output
+ // controlled by this flag and substitute it with custom one. Note that
+ // removing this object from the listener list with Release transfers its
+ // ownership to the caller and makes this function return NULL the next
+ // time.
+ TestEventListener* default_xml_generator() const {
+ return default_xml_generator_;
+ }
+
+ private:
+ friend class TestCase;
+ friend class TestInfo;
+ friend class internal::DefaultGlobalTestPartResultReporter;
+ friend class internal::NoExecDeathTest;
+ friend class internal::TestEventListenersAccessor;
+ friend class internal::UnitTestImpl;
+
+ // Returns repeater that broadcasts the TestEventListener events to all
+ // subscribers.
+ TestEventListener* repeater();
+
+ // Sets the default_result_printer attribute to the provided listener.
+ // The listener is also added to the listener list and previous
+ // default_result_printer is removed from it and deleted. The listener can
+ // also be NULL in which case it will not be added to the list. Does
+ // nothing if the previous and the current listener objects are the same.
+ void SetDefaultResultPrinter(TestEventListener* listener);
+
+ // Sets the default_xml_generator attribute to the provided listener. The
+ // listener is also added to the listener list and previous
+ // default_xml_generator is removed from it and deleted. The listener can
+ // also be NULL in which case it will not be added to the list. Does
+ // nothing if the previous and the current listener objects are the same.
+ void SetDefaultXmlGenerator(TestEventListener* listener);
+
+ // Controls whether events will be forwarded by the repeater to the
+ // listeners in the list.
+ bool EventForwardingEnabled() const;
+ void SuppressEventForwarding();
+
+ // The actual list of listeners.
+ internal::TestEventRepeater* repeater_;
+ // Listener responsible for the standard result output.
+ TestEventListener* default_result_printer_;
+ // Listener responsible for the creation of the XML output file.
+ TestEventListener* default_xml_generator_;
+
+ // We disallow copying TestEventListeners.
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(TestEventListeners);
+};
+
+// A UnitTest consists of a vector of TestCases.
+//
+// This is a singleton class. The only instance of UnitTest is
+// created when UnitTest::GetInstance() is first called. This
+// instance is never deleted.
+//
+// UnitTest is not copyable.
+//
+// This class is thread-safe as long as the methods are called
+// according to their specification.
+class GTEST_API_ UnitTest {
+ public:
+ // Gets the singleton UnitTest object. The first time this method
+ // is called, a UnitTest object is constructed and returned.
+ // Consecutive calls will return the same object.
+ static UnitTest* GetInstance();
+
+ // Runs all tests in this UnitTest object and prints the result.
+ // Returns 0 if successful, or 1 otherwise.
+ //
+ // This method can only be called from the main thread.
+ //
+ // INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+ int Run() GTEST_MUST_USE_RESULT_;
+
+ // Returns the working directory when the first TEST() or TEST_F()
+ // was executed. The UnitTest object owns the string.
+ const char* original_working_dir() const;
+
+ // Returns the TestCase object for the test that's currently running,
+ // or NULL if no test is running.
+ const TestCase* current_test_case() const
+ GTEST_LOCK_EXCLUDED_(mutex_);
+
+ // Returns the TestInfo object for the test that's currently running,
+ // or NULL if no test is running.
+ const TestInfo* current_test_info() const
+ GTEST_LOCK_EXCLUDED_(mutex_);
+
+ // Returns the random seed used at the start of the current test run.
+ int random_seed() const;
+
+#if GTEST_HAS_PARAM_TEST
+ // Returns the ParameterizedTestCaseRegistry object used to keep track of
+ // value-parameterized tests and instantiate and register them.
+ //
+ // INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+ internal::ParameterizedTestCaseRegistry& parameterized_test_registry()
+ GTEST_LOCK_EXCLUDED_(mutex_);
+#endif // GTEST_HAS_PARAM_TEST
+
+ // Gets the number of successful test cases.
+ int successful_test_case_count() const;
+
+ // Gets the number of failed test cases.
+ int failed_test_case_count() const;
+
+ // Gets the number of all test cases.
+ int total_test_case_count() const;
+
+ // Gets the number of all test cases that contain at least one test
+ // that should run.
+ int test_case_to_run_count() const;
+
+ // Gets the number of successful tests.
+ int successful_test_count() const;
+
+ // Gets the number of failed tests.
+ int failed_test_count() const;
+
+ // Gets the number of disabled tests that will be reported in the XML report.
+ int reportable_disabled_test_count() const;
+
+ // Gets the number of disabled tests.
+ int disabled_test_count() const;
+
+ // Gets the number of tests to be printed in the XML report.
+ int reportable_test_count() const;
+
+ // Gets the number of all tests.
+ int total_test_count() const;
+
+ // Gets the number of tests that should run.
+ int test_to_run_count() const;
+
+ // Gets the time of the test program start, in ms from the start of the
+ // UNIX epoch.
+ TimeInMillis start_timestamp() const;
+
+ // Gets the elapsed time, in milliseconds.
+ TimeInMillis elapsed_time() const;
+
+ // Returns true iff the unit test passed (i.e. all test cases passed).
+ bool Passed() const;
+
+ // Returns true iff the unit test failed (i.e. some test case failed
+ // or something outside of all tests failed).
+ bool Failed() const;
+
+ // Gets the i-th test case among all the test cases. i can range from 0 to
+ // total_test_case_count() - 1. If i is not in that range, returns NULL.
+ const TestCase* GetTestCase(int i) const;
+
+ // Returns the TestResult containing information on test failures and
+ // properties logged outside of individual test cases.
+ const TestResult& ad_hoc_test_result() const;
+
+ // Returns the list of event listeners that can be used to track events
+ // inside Google Test.
+ TestEventListeners& listeners();
+
+ private:
+ // Registers and returns a global test environment. When a test
+ // program is run, all global test environments will be set-up in
+ // the order they were registered. After all tests in the program
+ // have finished, all global test environments will be torn-down in
+ // the *reverse* order they were registered.
+ //
+ // The UnitTest object takes ownership of the given environment.
+ //
+ // This method can only be called from the main thread.
+ Environment* AddEnvironment(Environment* env);
+
+ // Adds a TestPartResult to the current TestResult object. All
+ // Google Test assertion macros (e.g. ASSERT_TRUE, EXPECT_EQ, etc)
+ // eventually call this to report their results. The user code
+ // should use the assertion macros instead of calling this directly.
+ void AddTestPartResult(TestPartResult::Type result_type,
+ const char* file_name,
+ int line_number,
+ const std::string& message,
+ const std::string& os_stack_trace)
+ GTEST_LOCK_EXCLUDED_(mutex_);
+
+ // Adds a TestProperty to the current TestResult object when invoked from
+ // inside a test, to current TestCase's ad_hoc_test_result_ when invoked
+ // from SetUpTestCase or TearDownTestCase, or to the global property set
+ // when invoked elsewhere. If the result already contains a property with
+ // the same key, the value will be updated.
+ void RecordProperty(const std::string& key, const std::string& value);
+
+ // Gets the i-th test case among all the test cases. i can range from 0 to
+ // total_test_case_count() - 1. If i is not in that range, returns NULL.
+ TestCase* GetMutableTestCase(int i);
+
+ // Accessors for the implementation object.
+ internal::UnitTestImpl* impl() { return impl_; }
+ const internal::UnitTestImpl* impl() const { return impl_; }
+
+ // These classes and funcions are friends as they need to access private
+ // members of UnitTest.
+ friend class Test;
+ friend class internal::AssertHelper;
+ friend class internal::ScopedTrace;
+ friend class internal::StreamingListenerTest;
+ friend class internal::UnitTestRecordPropertyTestHelper;
+ friend Environment* AddGlobalTestEnvironment(Environment* env);
+ friend internal::UnitTestImpl* internal::GetUnitTestImpl();
+ friend void internal::ReportFailureInUnknownLocation(
+ TestPartResult::Type result_type,
+ const std::string& message);
+
+ // Creates an empty UnitTest.
+ UnitTest();
+
+ // D'tor
+ virtual ~UnitTest();
+
+ // Pushes a trace defined by SCOPED_TRACE() on to the per-thread
+ // Google Test trace stack.
+ void PushGTestTrace(const internal::TraceInfo& trace)
+ GTEST_LOCK_EXCLUDED_(mutex_);
+
+ // Pops a trace from the per-thread Google Test trace stack.
+ void PopGTestTrace()
+ GTEST_LOCK_EXCLUDED_(mutex_);
+
+ // Protects mutable state in *impl_. This is mutable as some const
+ // methods need to lock it too.
+ mutable internal::Mutex mutex_;
+
+ // Opaque implementation object. This field is never changed once
+ // the object is constructed. We don't mark it as const here, as
+ // doing so will cause a warning in the constructor of UnitTest.
+ // Mutable state in *impl_ is protected by mutex_.
+ internal::UnitTestImpl* impl_;
+
+ // We disallow copying UnitTest.
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(UnitTest);
+};
+
+// A convenient wrapper for adding an environment for the test
+// program.
+//
+// You should call this before RUN_ALL_TESTS() is called, probably in
+// main(). If you use gtest_main, you need to call this before main()
+// starts for it to take effect. For example, you can define a global
+// variable like this:
+//
+// testing::Environment* const foo_env =
+// testing::AddGlobalTestEnvironment(new FooEnvironment);
+//
+// However, we strongly recommend you to write your own main() and
+// call AddGlobalTestEnvironment() there, as relying on initialization
+// of global variables makes the code harder to read and may cause
+// problems when you register multiple environments from different
+// translation units and the environments have dependencies among them
+// (remember that the compiler doesn't guarantee the order in which
+// global variables from different translation units are initialized).
+inline Environment* AddGlobalTestEnvironment(Environment* env) {
+ return UnitTest::GetInstance()->AddEnvironment(env);
+}
+
+// Initializes Google Test. This must be called before calling
+// RUN_ALL_TESTS(). In particular, it parses a command line for the
+// flags that Google Test recognizes. Whenever a Google Test flag is
+// seen, it is removed from argv, and *argc is decremented.
+//
+// No value is returned. Instead, the Google Test flag variables are
+// updated.
+//
+// Calling the function for the second time has no user-visible effect.
+GTEST_API_ void InitGoogleTest(int* argc, char** argv);
+
+// This overloaded version can be used in Windows programs compiled in
+// UNICODE mode.
+GTEST_API_ void InitGoogleTest(int* argc, wchar_t** argv);
+
+namespace internal {
+
+// FormatForComparison<ToPrint, OtherOperand>::Format(value) formats a
+// value of type ToPrint that is an operand of a comparison assertion
+// (e.g. ASSERT_EQ). OtherOperand is the type of the other operand in
+// the comparison, and is used to help determine the best way to
+// format the value. In particular, when the value is a C string
+// (char pointer) and the other operand is an STL string object, we
+// want to format the C string as a string, since we know it is
+// compared by value with the string object. If the value is a char
+// pointer but the other operand is not an STL string object, we don't
+// know whether the pointer is supposed to point to a NUL-terminated
+// string, and thus want to print it as a pointer to be safe.
+//
+// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+
+// The default case.
+template <typename ToPrint, typename OtherOperand>
+class FormatForComparison {
+ public:
+ static ::std::string Format(const ToPrint& value) {
+ return ::testing::PrintToString(value);
+ }
+};
+
+// Array.
+template <typename ToPrint, size_t N, typename OtherOperand>
+class FormatForComparison<ToPrint[N], OtherOperand> {
+ public:
+ static ::std::string Format(const ToPrint* value) {
+ return FormatForComparison<const ToPrint*, OtherOperand>::Format(value);
+ }
+};
+
+// By default, print C string as pointers to be safe, as we don't know
+// whether they actually point to a NUL-terminated string.
+
+#define GTEST_IMPL_FORMAT_C_STRING_AS_POINTER_(CharType) \
+ template <typename OtherOperand> \
+ class FormatForComparison<CharType*, OtherOperand> { \
+ public: \
+ static ::std::string Format(CharType* value) { \
+ return ::testing::PrintToString(static_cast<const void*>(value)); \
+ } \
+ }
+
+GTEST_IMPL_FORMAT_C_STRING_AS_POINTER_(char);
+GTEST_IMPL_FORMAT_C_STRING_AS_POINTER_(const char);
+GTEST_IMPL_FORMAT_C_STRING_AS_POINTER_(wchar_t);
+GTEST_IMPL_FORMAT_C_STRING_AS_POINTER_(const wchar_t);
+
+#undef GTEST_IMPL_FORMAT_C_STRING_AS_POINTER_
+
+// If a C string is compared with an STL string object, we know it's meant
+// to point to a NUL-terminated string, and thus can print it as a string.
+
+#define GTEST_IMPL_FORMAT_C_STRING_AS_STRING_(CharType, OtherStringType) \
+ template <> \
+ class FormatForComparison<CharType*, OtherStringType> { \
+ public: \
+ static ::std::string Format(CharType* value) { \
+ return ::testing::PrintToString(value); \
+ } \
+ }
+
+GTEST_IMPL_FORMAT_C_STRING_AS_STRING_(char, ::std::string);
+GTEST_IMPL_FORMAT_C_STRING_AS_STRING_(const char, ::std::string);
+
+#if GTEST_HAS_GLOBAL_STRING
+GTEST_IMPL_FORMAT_C_STRING_AS_STRING_(char, ::string);
+GTEST_IMPL_FORMAT_C_STRING_AS_STRING_(const char, ::string);
+#endif
+
+#if GTEST_HAS_GLOBAL_WSTRING
+GTEST_IMPL_FORMAT_C_STRING_AS_STRING_(wchar_t, ::wstring);
+GTEST_IMPL_FORMAT_C_STRING_AS_STRING_(const wchar_t, ::wstring);
+#endif
+
+#if GTEST_HAS_STD_WSTRING
+GTEST_IMPL_FORMAT_C_STRING_AS_STRING_(wchar_t, ::std::wstring);
+GTEST_IMPL_FORMAT_C_STRING_AS_STRING_(const wchar_t, ::std::wstring);
+#endif
+
+#undef GTEST_IMPL_FORMAT_C_STRING_AS_STRING_
+
+// Formats a comparison assertion (e.g. ASSERT_EQ, EXPECT_LT, and etc)
+// operand to be used in a failure message. The type (but not value)
+// of the other operand may affect the format. This allows us to
+// print a char* as a raw pointer when it is compared against another
+// char* or void*, and print it as a C string when it is compared
+// against an std::string object, for example.
+//
+// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+template <typename T1, typename T2>
+std::string FormatForComparisonFailureMessage(
+ const T1& value, const T2& /* other_operand */) {
+ return FormatForComparison<T1, T2>::Format(value);
+}
+
+// The helper function for {ASSERT|EXPECT}_EQ.
+template <typename T1, typename T2>
+AssertionResult CmpHelperEQ(const char* expected_expression,
+ const char* actual_expression,
+ const T1& expected,
+ const T2& actual) {
+#ifdef _MSC_VER
+# pragma warning(push) // Saves the current warning state.
+# pragma warning(disable:4389) // Temporarily disables warning on
+ // signed/unsigned mismatch.
+#endif
+
+ if (expected == actual) {
+ return AssertionSuccess();
+ }
+
+#ifdef _MSC_VER
+# pragma warning(pop) // Restores the warning state.
+#endif
+
+ return EqFailure(expected_expression,
+ actual_expression,
+ FormatForComparisonFailureMessage(expected, actual),
+ FormatForComparisonFailureMessage(actual, expected),
+ false);
+}
+
+// With this overloaded version, we allow anonymous enums to be used
+// in {ASSERT|EXPECT}_EQ when compiled with gcc 4, as anonymous enums
+// can be implicitly cast to BiggestInt.
+GTEST_API_ AssertionResult CmpHelperEQ(const char* expected_expression,
+ const char* actual_expression,
+ BiggestInt expected,
+ BiggestInt actual);
+
+// The helper class for {ASSERT|EXPECT}_EQ. The template argument
+// lhs_is_null_literal is true iff the first argument to ASSERT_EQ()
+// is a null pointer literal. The following default implementation is
+// for lhs_is_null_literal being false.
+template <bool lhs_is_null_literal>
+class EqHelper {
+ public:
+ // This templatized version is for the general case.
+ template <typename T1, typename T2>
+ static AssertionResult Compare(const char* expected_expression,
+ const char* actual_expression,
+ const T1& expected,
+ const T2& actual) {
+ return CmpHelperEQ(expected_expression, actual_expression, expected,
+ actual);
+ }
+
+ // With this overloaded version, we allow anonymous enums to be used
+ // in {ASSERT|EXPECT}_EQ when compiled with gcc 4, as anonymous
+ // enums can be implicitly cast to BiggestInt.
+ //
+ // Even though its body looks the same as the above version, we
+ // cannot merge the two, as it will make anonymous enums unhappy.
+ static AssertionResult Compare(const char* expected_expression,
+ const char* actual_expression,
+ BiggestInt expected,
+ BiggestInt actual) {
+ return CmpHelperEQ(expected_expression, actual_expression, expected,
+ actual);
+ }
+};
+
+// This specialization is used when the first argument to ASSERT_EQ()
+// is a null pointer literal, like NULL, false, or 0.
+template <>
+class EqHelper<true> {
+ public:
+ // We define two overloaded versions of Compare(). The first
+ // version will be picked when the second argument to ASSERT_EQ() is
+ // NOT a pointer, e.g. ASSERT_EQ(0, AnIntFunction()) or
+ // EXPECT_EQ(false, a_bool).
+ template <typename T1, typename T2>
+ static AssertionResult Compare(
+ const char* expected_expression,
+ const char* actual_expression,
+ const T1& expected,
+ const T2& actual,
+ // The following line prevents this overload from being considered if T2
+ // is not a pointer type. We need this because ASSERT_EQ(NULL, my_ptr)
+ // expands to Compare("", "", NULL, my_ptr), which requires a conversion
+ // to match the Secret* in the other overload, which would otherwise make
+ // this template match better.
+ typename EnableIf<!is_pointer<T2>::value>::type* = 0) {
+ return CmpHelperEQ(expected_expression, actual_expression, expected,
+ actual);
+ }
+
+ // This version will be picked when the second argument to ASSERT_EQ() is a
+ // pointer, e.g. ASSERT_EQ(NULL, a_pointer).
+ template <typename T>
+ static AssertionResult Compare(
+ const char* expected_expression,
+ const char* actual_expression,
+ // We used to have a second template parameter instead of Secret*. That
+ // template parameter would deduce to 'long', making this a better match
+ // than the first overload even without the first overload's EnableIf.
+ // Unfortunately, gcc with -Wconversion-null warns when "passing NULL to
+ // non-pointer argument" (even a deduced integral argument), so the old
+ // implementation caused warnings in user code.
+ Secret* /* expected (NULL) */,
+ T* actual) {
+ // We already know that 'expected' is a null pointer.
+ return CmpHelperEQ(expected_expression, actual_expression,
+ static_cast<T*>(NULL), actual);
+ }
+};
+
+// A macro for implementing the helper functions needed to implement
+// ASSERT_?? and EXPECT_??. It is here just to avoid copy-and-paste
+// of similar code.
+//
+// For each templatized helper function, we also define an overloaded
+// version for BiggestInt in order to reduce code bloat and allow
+// anonymous enums to be used with {ASSERT|EXPECT}_?? when compiled
+// with gcc 4.
+//
+// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+#define GTEST_IMPL_CMP_HELPER_(op_name, op)\
+template <typename T1, typename T2>\
+AssertionResult CmpHelper##op_name(const char* expr1, const char* expr2, \
+ const T1& val1, const T2& val2) {\
+ if (val1 op val2) {\
+ return AssertionSuccess();\
+ } else {\
+ return AssertionFailure() \
+ << "Expected: (" << expr1 << ") " #op " (" << expr2\
+ << "), actual: " << FormatForComparisonFailureMessage(val1, val2)\
+ << " vs " << FormatForComparisonFailureMessage(val2, val1);\
+ }\
+}\
+GTEST_API_ AssertionResult CmpHelper##op_name(\
+ const char* expr1, const char* expr2, BiggestInt val1, BiggestInt val2)
+
+// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+
+// Implements the helper function for {ASSERT|EXPECT}_NE
+GTEST_IMPL_CMP_HELPER_(NE, !=);
+// Implements the helper function for {ASSERT|EXPECT}_LE
+GTEST_IMPL_CMP_HELPER_(LE, <=);
+// Implements the helper function for {ASSERT|EXPECT}_LT
+GTEST_IMPL_CMP_HELPER_(LT, <);
+// Implements the helper function for {ASSERT|EXPECT}_GE
+GTEST_IMPL_CMP_HELPER_(GE, >=);
+// Implements the helper function for {ASSERT|EXPECT}_GT
+GTEST_IMPL_CMP_HELPER_(GT, >);
+
+#undef GTEST_IMPL_CMP_HELPER_
+
+// The helper function for {ASSERT|EXPECT}_STREQ.
+//
+// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+GTEST_API_ AssertionResult CmpHelperSTREQ(const char* expected_expression,
+ const char* actual_expression,
+ const char* expected,
+ const char* actual);
+
+// The helper function for {ASSERT|EXPECT}_STRCASEEQ.
+//
+// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+GTEST_API_ AssertionResult CmpHelperSTRCASEEQ(const char* expected_expression,
+ const char* actual_expression,
+ const char* expected,
+ const char* actual);
+
+// The helper function for {ASSERT|EXPECT}_STRNE.
+//
+// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+GTEST_API_ AssertionResult CmpHelperSTRNE(const char* s1_expression,
+ const char* s2_expression,
+ const char* s1,
+ const char* s2);
+
+// The helper function for {ASSERT|EXPECT}_STRCASENE.
+//
+// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+GTEST_API_ AssertionResult CmpHelperSTRCASENE(const char* s1_expression,
+ const char* s2_expression,
+ const char* s1,
+ const char* s2);
+
+
+// Helper function for *_STREQ on wide strings.
+//
+// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+GTEST_API_ AssertionResult CmpHelperSTREQ(const char* expected_expression,
+ const char* actual_expression,
+ const wchar_t* expected,
+ const wchar_t* actual);
+
+// Helper function for *_STRNE on wide strings.
+//
+// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+GTEST_API_ AssertionResult CmpHelperSTRNE(const char* s1_expression,
+ const char* s2_expression,
+ const wchar_t* s1,
+ const wchar_t* s2);
+
+} // namespace internal
+
+// IsSubstring() and IsNotSubstring() are intended to be used as the
+// first argument to {EXPECT,ASSERT}_PRED_FORMAT2(), not by
+// themselves. They check whether needle is a substring of haystack
+// (NULL is considered a substring of itself only), and return an
+// appropriate error message when they fail.
+//
+// The {needle,haystack}_expr arguments are the stringified
+// expressions that generated the two real arguments.
+GTEST_API_ AssertionResult IsSubstring(
+ const char* needle_expr, const char* haystack_expr,
+ const char* needle, const char* haystack);
+GTEST_API_ AssertionResult IsSubstring(
+ const char* needle_expr, const char* haystack_expr,
+ const wchar_t* needle, const wchar_t* haystack);
+GTEST_API_ AssertionResult IsNotSubstring(
+ const char* needle_expr, const char* haystack_expr,
+ const char* needle, const char* haystack);
+GTEST_API_ AssertionResult IsNotSubstring(
+ const char* needle_expr, const char* haystack_expr,
+ const wchar_t* needle, const wchar_t* haystack);
+GTEST_API_ AssertionResult IsSubstring(
+ const char* needle_expr, const char* haystack_expr,
+ const ::std::string& needle, const ::std::string& haystack);
+GTEST_API_ AssertionResult IsNotSubstring(
+ const char* needle_expr, const char* haystack_expr,
+ const ::std::string& needle, const ::std::string& haystack);
+
+#if GTEST_HAS_STD_WSTRING
+GTEST_API_ AssertionResult IsSubstring(
+ const char* needle_expr, const char* haystack_expr,
+ const ::std::wstring& needle, const ::std::wstring& haystack);
+GTEST_API_ AssertionResult IsNotSubstring(
+ const char* needle_expr, const char* haystack_expr,
+ const ::std::wstring& needle, const ::std::wstring& haystack);
+#endif // GTEST_HAS_STD_WSTRING
+
+namespace internal {
+
+// Helper template function for comparing floating-points.
+//
+// Template parameter:
+//
+// RawType: the raw floating-point type (either float or double)
+//
+// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+template <typename RawType>
+AssertionResult CmpHelperFloatingPointEQ(const char* expected_expression,
+ const char* actual_expression,
+ RawType expected,
+ RawType actual) {
+ const FloatingPoint<RawType> lhs(expected), rhs(actual);
+
+ if (lhs.AlmostEquals(rhs)) {
+ return AssertionSuccess();
+ }
+
+ ::std::stringstream expected_ss;
+ expected_ss << std::setprecision(std::numeric_limits<RawType>::digits10 + 2)
+ << expected;
+
+ ::std::stringstream actual_ss;
+ actual_ss << std::setprecision(std::numeric_limits<RawType>::digits10 + 2)
+ << actual;
+
+ return EqFailure(expected_expression,
+ actual_expression,
+ StringStreamToString(&expected_ss),
+ StringStreamToString(&actual_ss),
+ false);
+}
+
+// Helper function for implementing ASSERT_NEAR.
+//
+// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+GTEST_API_ AssertionResult DoubleNearPredFormat(const char* expr1,
+ const char* expr2,
+ const char* abs_error_expr,
+ double val1,
+ double val2,
+ double abs_error);
+
+// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
+// A class that enables one to stream messages to assertion macros
+class GTEST_API_ AssertHelper {
+ public:
+ // Constructor.
+ AssertHelper(TestPartResult::Type type,
+ const char* file,
+ int line,
+ const char* message);
+ ~AssertHelper();
+
+ // Message assignment is a semantic trick to enable assertion
+ // streaming; see the GTEST_MESSAGE_ macro below.
+ void operator=(const Message& message) const;
+
+ private:
+ // We put our data in a struct so that the size of the AssertHelper class can
+ // be as small as possible. This is important because gcc is incapable of
+ // re-using stack space even for temporary variables, so every EXPECT_EQ
+ // reserves stack space for another AssertHelper.
+ struct AssertHelperData {
+ AssertHelperData(TestPartResult::Type t,
+ const char* srcfile,
+ int line_num,
+ const char* msg)
+ : type(t), file(srcfile), line(line_num), message(msg) { }
+
+ TestPartResult::Type const type;
+ const char* const file;
+ int const line;
+ std::string const message;
+
+ private:
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(AssertHelperData);
+ };
+
+ AssertHelperData* const data_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(AssertHelper);
+};
+
+} // namespace internal
+
+#if GTEST_HAS_PARAM_TEST
+// The pure interface class that all value-parameterized tests inherit from.
+// A value-parameterized class must inherit from both ::testing::Test and
+// ::testing::WithParamInterface. In most cases that just means inheriting
+// from ::testing::TestWithParam, but more complicated test hierarchies
+// may need to inherit from Test and WithParamInterface at different levels.
+//
+// This interface has support for accessing the test parameter value via
+// the GetParam() method.
+//
+// Use it with one of the parameter generator defining functions, like Range(),
+// Values(), ValuesIn(), Bool(), and Combine().
+//
+// class FooTest : public ::testing::TestWithParam<int> {
+// protected:
+// FooTest() {
+// // Can use GetParam() here.
+// }
+// virtual ~FooTest() {
+// // Can use GetParam() here.
+// }
+// virtual void SetUp() {
+// // Can use GetParam() here.
+// }
+// virtual void TearDown {
+// // Can use GetParam() here.
+// }
+// };
+// TEST_P(FooTest, DoesBar) {
+// // Can use GetParam() method here.
+// Foo foo;
+// ASSERT_TRUE(foo.DoesBar(GetParam()));
+// }
+// INSTANTIATE_TEST_CASE_P(OneToTenRange, FooTest, ::testing::Range(1, 10));
+
+template <typename T>
+class WithParamInterface {
+ public:
+ typedef T ParamType;
+ virtual ~WithParamInterface() {}
+
+ // The current parameter value. Is also available in the test fixture's
+ // constructor. This member function is non-static, even though it only
+ // references static data, to reduce the opportunity for incorrect uses
+ // like writing 'WithParamInterface<bool>::GetParam()' for a test that
+ // uses a fixture whose parameter type is int.
+ const ParamType& GetParam() const {
+ GTEST_CHECK_(parameter_ != NULL)
+ << "GetParam() can only be called inside a value-parameterized test "
+ << "-- did you intend to write TEST_P instead of TEST_F?";
+ return *parameter_;
+ }
+
+ private:
+ // Sets parameter value. The caller is responsible for making sure the value
+ // remains alive and unchanged throughout the current test.
+ static void SetParam(const ParamType* parameter) {
+ parameter_ = parameter;
+ }
+
+ // Static value used for accessing parameter during a test lifetime.
+ static const ParamType* parameter_;
+
+ // TestClass must be a subclass of WithParamInterface<T> and Test.
+ template <class TestClass> friend class internal::ParameterizedTestFactory;
+};
+
+template <typename T>
+const T* WithParamInterface<T>::parameter_ = NULL;
+
+// Most value-parameterized classes can ignore the existence of
+// WithParamInterface, and can just inherit from ::testing::TestWithParam.
+
+template <typename T>
+class TestWithParam : public Test, public WithParamInterface<T> {
+};
+
+#endif // GTEST_HAS_PARAM_TEST
+
+// Macros for indicating success/failure in test code.
+
+// ADD_FAILURE unconditionally adds a failure to the current test.
+// SUCCEED generates a success - it doesn't automatically make the
+// current test successful, as a test is only successful when it has
+// no failure.
+//
+// EXPECT_* verifies that a certain condition is satisfied. If not,
+// it behaves like ADD_FAILURE. In particular:
+//
+// EXPECT_TRUE verifies that a Boolean condition is true.
+// EXPECT_FALSE verifies that a Boolean condition is false.
+//
+// FAIL and ASSERT_* are similar to ADD_FAILURE and EXPECT_*, except
+// that they will also abort the current function on failure. People
+// usually want the fail-fast behavior of FAIL and ASSERT_*, but those
+// writing data-driven tests often find themselves using ADD_FAILURE
+// and EXPECT_* more.
+
+// Generates a nonfatal failure with a generic message.
+#define ADD_FAILURE() GTEST_NONFATAL_FAILURE_("Failed")
+
+// Generates a nonfatal failure at the given source file location with
+// a generic message.
+#define ADD_FAILURE_AT(file, line) \
+ GTEST_MESSAGE_AT_(file, line, "Failed", \
+ ::testing::TestPartResult::kNonFatalFailure)
+
+// Generates a fatal failure with a generic message.
+#define GTEST_FAIL() GTEST_FATAL_FAILURE_("Failed")
+
+// Define this macro to 1 to omit the definition of FAIL(), which is a
+// generic name and clashes with some other libraries.
+#if !GTEST_DONT_DEFINE_FAIL
+# define FAIL() GTEST_FAIL()
+#endif
+
+// Generates a success with a generic message.
+#define GTEST_SUCCEED() GTEST_SUCCESS_("Succeeded")
+
+// Define this macro to 1 to omit the definition of SUCCEED(), which
+// is a generic name and clashes with some other libraries.
+#if !GTEST_DONT_DEFINE_SUCCEED
+# define SUCCEED() GTEST_SUCCEED()
+#endif
+
+// Macros for testing exceptions.
+//
+// * {ASSERT|EXPECT}_THROW(statement, expected_exception):
+// Tests that the statement throws the expected exception.
+// * {ASSERT|EXPECT}_NO_THROW(statement):
+// Tests that the statement doesn't throw any exception.
+// * {ASSERT|EXPECT}_ANY_THROW(statement):
+// Tests that the statement throws an exception.
+
+#define EXPECT_THROW(statement, expected_exception) \
+ GTEST_TEST_THROW_(statement, expected_exception, GTEST_NONFATAL_FAILURE_)
+#define EXPECT_NO_THROW(statement) \
+ GTEST_TEST_NO_THROW_(statement, GTEST_NONFATAL_FAILURE_)
+#define EXPECT_ANY_THROW(statement) \
+ GTEST_TEST_ANY_THROW_(statement, GTEST_NONFATAL_FAILURE_)
+#define ASSERT_THROW(statement, expected_exception) \
+ GTEST_TEST_THROW_(statement, expected_exception, GTEST_FATAL_FAILURE_)
+#define ASSERT_NO_THROW(statement) \
+ GTEST_TEST_NO_THROW_(statement, GTEST_FATAL_FAILURE_)
+#define ASSERT_ANY_THROW(statement) \
+ GTEST_TEST_ANY_THROW_(statement, GTEST_FATAL_FAILURE_)
+
+// Boolean assertions. Condition can be either a Boolean expression or an
+// AssertionResult. For more information on how to use AssertionResult with
+// these macros see comments on that class.
+#define EXPECT_TRUE(condition) \
+ GTEST_TEST_BOOLEAN_(condition, #condition, false, true, \
+ GTEST_NONFATAL_FAILURE_)
+#define EXPECT_FALSE(condition) \
+ GTEST_TEST_BOOLEAN_(!(condition), #condition, true, false, \
+ GTEST_NONFATAL_FAILURE_)
+#define ASSERT_TRUE(condition) \
+ GTEST_TEST_BOOLEAN_(condition, #condition, false, true, \
+ GTEST_FATAL_FAILURE_)
+#define ASSERT_FALSE(condition) \
+ GTEST_TEST_BOOLEAN_(!(condition), #condition, true, false, \
+ GTEST_FATAL_FAILURE_)
+
+// Includes the auto-generated header that implements a family of
+// generic predicate assertion macros.
+// Copyright 2006, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// This file is AUTOMATICALLY GENERATED on 10/31/2011 by command
+// 'gen_gtest_pred_impl.py 5'. DO NOT EDIT BY HAND!
+//
+// Implements a family of generic predicate assertion macros.
+
+#ifndef GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_
+#define GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_
+
+// Makes sure this header is not included before gtest.h.
+#ifndef GTEST_INCLUDE_GTEST_GTEST_H_
+# error Do not include gtest_pred_impl.h directly. Include gtest.h instead.
+#endif // GTEST_INCLUDE_GTEST_GTEST_H_
+
+// This header implements a family of generic predicate assertion
+// macros:
+//
+// ASSERT_PRED_FORMAT1(pred_format, v1)
+// ASSERT_PRED_FORMAT2(pred_format, v1, v2)
+// ...
+//
+// where pred_format is a function or functor that takes n (in the
+// case of ASSERT_PRED_FORMATn) values and their source expression
+// text, and returns a testing::AssertionResult. See the definition
+// of ASSERT_EQ in gtest.h for an example.
+//
+// If you don't care about formatting, you can use the more
+// restrictive version:
+//
+// ASSERT_PRED1(pred, v1)
+// ASSERT_PRED2(pred, v1, v2)
+// ...
+//
+// where pred is an n-ary function or functor that returns bool,
+// and the values v1, v2, ..., must support the << operator for
+// streaming to std::ostream.
+//
+// We also define the EXPECT_* variations.
+//
+// For now we only support predicates whose arity is at most 5.
+// Please email googletestframework@googlegroups.com if you need
+// support for higher arities.
+
+// GTEST_ASSERT_ is the basic statement to which all of the assertions
+// in this file reduce. Don't use this in your code.
+
+#define GTEST_ASSERT_(expression, on_failure) \
+ GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
+ if (const ::testing::AssertionResult gtest_ar = (expression)) \
+ ; \
+ else \
+ on_failure(gtest_ar.failure_message())
+
+
+// Helper function for implementing {EXPECT|ASSERT}_PRED1. Don't use
+// this in your code.
+template <typename Pred,
+ typename T1>
+AssertionResult AssertPred1Helper(const char* pred_text,
+ const char* e1,
+ Pred pred,
+ const T1& v1) {
+ if (pred(v1)) return AssertionSuccess();
+
+ return AssertionFailure() << pred_text << "("
+ << e1 << ") evaluates to false, where"
+ << "\n" << e1 << " evaluates to " << v1;
+}
+
+// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT1.
+// Don't use this in your code.
+#define GTEST_PRED_FORMAT1_(pred_format, v1, on_failure)\
+ GTEST_ASSERT_(pred_format(#v1, v1), \
+ on_failure)
+
+// Internal macro for implementing {EXPECT|ASSERT}_PRED1. Don't use
+// this in your code.
+#define GTEST_PRED1_(pred, v1, on_failure)\
+ GTEST_ASSERT_(::testing::AssertPred1Helper(#pred, \
+ #v1, \
+ pred, \
+ v1), on_failure)
+
+// Unary predicate assertion macros.
+#define EXPECT_PRED_FORMAT1(pred_format, v1) \
+ GTEST_PRED_FORMAT1_(pred_format, v1, GTEST_NONFATAL_FAILURE_)
+#define EXPECT_PRED1(pred, v1) \
+ GTEST_PRED1_(pred, v1, GTEST_NONFATAL_FAILURE_)
+#define ASSERT_PRED_FORMAT1(pred_format, v1) \
+ GTEST_PRED_FORMAT1_(pred_format, v1, GTEST_FATAL_FAILURE_)
+#define ASSERT_PRED1(pred, v1) \
+ GTEST_PRED1_(pred, v1, GTEST_FATAL_FAILURE_)
+
+
+
+// Helper function for implementing {EXPECT|ASSERT}_PRED2. Don't use
+// this in your code.
+template <typename Pred,
+ typename T1,
+ typename T2>
+AssertionResult AssertPred2Helper(const char* pred_text,
+ const char* e1,
+ const char* e2,
+ Pred pred,
+ const T1& v1,
+ const T2& v2) {
+ if (pred(v1, v2)) return AssertionSuccess();
+
+ return AssertionFailure() << pred_text << "("
+ << e1 << ", "
+ << e2 << ") evaluates to false, where"
+ << "\n" << e1 << " evaluates to " << v1
+ << "\n" << e2 << " evaluates to " << v2;
+}
+
+// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT2.
+// Don't use this in your code.
+#define GTEST_PRED_FORMAT2_(pred_format, v1, v2, on_failure)\
+ GTEST_ASSERT_(pred_format(#v1, #v2, v1, v2), \
+ on_failure)
+
+// Internal macro for implementing {EXPECT|ASSERT}_PRED2. Don't use
+// this in your code.
+#define GTEST_PRED2_(pred, v1, v2, on_failure)\
+ GTEST_ASSERT_(::testing::AssertPred2Helper(#pred, \
+ #v1, \
+ #v2, \
+ pred, \
+ v1, \
+ v2), on_failure)
+
+// Binary predicate assertion macros.
+#define EXPECT_PRED_FORMAT2(pred_format, v1, v2) \
+ GTEST_PRED_FORMAT2_(pred_format, v1, v2, GTEST_NONFATAL_FAILURE_)
+#define EXPECT_PRED2(pred, v1, v2) \
+ GTEST_PRED2_(pred, v1, v2, GTEST_NONFATAL_FAILURE_)
+#define ASSERT_PRED_FORMAT2(pred_format, v1, v2) \
+ GTEST_PRED_FORMAT2_(pred_format, v1, v2, GTEST_FATAL_FAILURE_)
+#define ASSERT_PRED2(pred, v1, v2) \
+ GTEST_PRED2_(pred, v1, v2, GTEST_FATAL_FAILURE_)
+
+
+
+// Helper function for implementing {EXPECT|ASSERT}_PRED3. Don't use
+// this in your code.
+template <typename Pred,
+ typename T1,
+ typename T2,
+ typename T3>
+AssertionResult AssertPred3Helper(const char* pred_text,
+ const char* e1,
+ const char* e2,
+ const char* e3,
+ Pred pred,
+ const T1& v1,
+ const T2& v2,
+ const T3& v3) {
+ if (pred(v1, v2, v3)) return AssertionSuccess();
+
+ return AssertionFailure() << pred_text << "("
+ << e1 << ", "
+ << e2 << ", "
+ << e3 << ") evaluates to false, where"
+ << "\n" << e1 << " evaluates to " << v1
+ << "\n" << e2 << " evaluates to " << v2
+ << "\n" << e3 << " evaluates to " << v3;
+}
+
+// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT3.
+// Don't use this in your code.
+#define GTEST_PRED_FORMAT3_(pred_format, v1, v2, v3, on_failure)\
+ GTEST_ASSERT_(pred_format(#v1, #v2, #v3, v1, v2, v3), \
+ on_failure)
+
+// Internal macro for implementing {EXPECT|ASSERT}_PRED3. Don't use
+// this in your code.
+#define GTEST_PRED3_(pred, v1, v2, v3, on_failure)\
+ GTEST_ASSERT_(::testing::AssertPred3Helper(#pred, \
+ #v1, \
+ #v2, \
+ #v3, \
+ pred, \
+ v1, \
+ v2, \
+ v3), on_failure)
+
+// Ternary predicate assertion macros.
+#define EXPECT_PRED_FORMAT3(pred_format, v1, v2, v3) \
+ GTEST_PRED_FORMAT3_(pred_format, v1, v2, v3, GTEST_NONFATAL_FAILURE_)
+#define EXPECT_PRED3(pred, v1, v2, v3) \
+ GTEST_PRED3_(pred, v1, v2, v3, GTEST_NONFATAL_FAILURE_)
+#define ASSERT_PRED_FORMAT3(pred_format, v1, v2, v3) \
+ GTEST_PRED_FORMAT3_(pred_format, v1, v2, v3, GTEST_FATAL_FAILURE_)
+#define ASSERT_PRED3(pred, v1, v2, v3) \
+ GTEST_PRED3_(pred, v1, v2, v3, GTEST_FATAL_FAILURE_)
+
+
+
+// Helper function for implementing {EXPECT|ASSERT}_PRED4. Don't use
+// this in your code.
+template <typename Pred,
+ typename T1,
+ typename T2,
+ typename T3,
+ typename T4>
+AssertionResult AssertPred4Helper(const char* pred_text,
+ const char* e1,
+ const char* e2,
+ const char* e3,
+ const char* e4,
+ Pred pred,
+ const T1& v1,
+ const T2& v2,
+ const T3& v3,
+ const T4& v4) {
+ if (pred(v1, v2, v3, v4)) return AssertionSuccess();
+
+ return AssertionFailure() << pred_text << "("
+ << e1 << ", "
+ << e2 << ", "
+ << e3 << ", "
+ << e4 << ") evaluates to false, where"
+ << "\n" << e1 << " evaluates to " << v1
+ << "\n" << e2 << " evaluates to " << v2
+ << "\n" << e3 << " evaluates to " << v3
+ << "\n" << e4 << " evaluates to " << v4;
+}
+
+// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT4.
+// Don't use this in your code.
+#define GTEST_PRED_FORMAT4_(pred_format, v1, v2, v3, v4, on_failure)\
+ GTEST_ASSERT_(pred_format(#v1, #v2, #v3, #v4, v1, v2, v3, v4), \
+ on_failure)
+
+// Internal macro for implementing {EXPECT|ASSERT}_PRED4. Don't use
+// this in your code.
+#define GTEST_PRED4_(pred, v1, v2, v3, v4, on_failure)\
+ GTEST_ASSERT_(::testing::AssertPred4Helper(#pred, \
+ #v1, \
+ #v2, \
+ #v3, \
+ #v4, \
+ pred, \
+ v1, \
+ v2, \
+ v3, \
+ v4), on_failure)
+
+// 4-ary predicate assertion macros.
+#define EXPECT_PRED_FORMAT4(pred_format, v1, v2, v3, v4) \
+ GTEST_PRED_FORMAT4_(pred_format, v1, v2, v3, v4, GTEST_NONFATAL_FAILURE_)
+#define EXPECT_PRED4(pred, v1, v2, v3, v4) \
+ GTEST_PRED4_(pred, v1, v2, v3, v4, GTEST_NONFATAL_FAILURE_)
+#define ASSERT_PRED_FORMAT4(pred_format, v1, v2, v3, v4) \
+ GTEST_PRED_FORMAT4_(pred_format, v1, v2, v3, v4, GTEST_FATAL_FAILURE_)
+#define ASSERT_PRED4(pred, v1, v2, v3, v4) \
+ GTEST_PRED4_(pred, v1, v2, v3, v4, GTEST_FATAL_FAILURE_)
+
+
+
+// Helper function for implementing {EXPECT|ASSERT}_PRED5. Don't use
+// this in your code.
+template <typename Pred,
+ typename T1,
+ typename T2,
+ typename T3,
+ typename T4,
+ typename T5>
+AssertionResult AssertPred5Helper(const char* pred_text,
+ const char* e1,
+ const char* e2,
+ const char* e3,
+ const char* e4,
+ const char* e5,
+ Pred pred,
+ const T1& v1,
+ const T2& v2,
+ const T3& v3,
+ const T4& v4,
+ const T5& v5) {
+ if (pred(v1, v2, v3, v4, v5)) return AssertionSuccess();
+
+ return AssertionFailure() << pred_text << "("
+ << e1 << ", "
+ << e2 << ", "
+ << e3 << ", "
+ << e4 << ", "
+ << e5 << ") evaluates to false, where"
+ << "\n" << e1 << " evaluates to " << v1
+ << "\n" << e2 << " evaluates to " << v2
+ << "\n" << e3 << " evaluates to " << v3
+ << "\n" << e4 << " evaluates to " << v4
+ << "\n" << e5 << " evaluates to " << v5;
+}
+
+// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT5.
+// Don't use this in your code.
+#define GTEST_PRED_FORMAT5_(pred_format, v1, v2, v3, v4, v5, on_failure)\
+ GTEST_ASSERT_(pred_format(#v1, #v2, #v3, #v4, #v5, v1, v2, v3, v4, v5), \
+ on_failure)
+
+// Internal macro for implementing {EXPECT|ASSERT}_PRED5. Don't use
+// this in your code.
+#define GTEST_PRED5_(pred, v1, v2, v3, v4, v5, on_failure)\
+ GTEST_ASSERT_(::testing::AssertPred5Helper(#pred, \
+ #v1, \
+ #v2, \
+ #v3, \
+ #v4, \
+ #v5, \
+ pred, \
+ v1, \
+ v2, \
+ v3, \
+ v4, \
+ v5), on_failure)
+
+// 5-ary predicate assertion macros.
+#define EXPECT_PRED_FORMAT5(pred_format, v1, v2, v3, v4, v5) \
+ GTEST_PRED_FORMAT5_(pred_format, v1, v2, v3, v4, v5, GTEST_NONFATAL_FAILURE_)
+#define EXPECT_PRED5(pred, v1, v2, v3, v4, v5) \
+ GTEST_PRED5_(pred, v1, v2, v3, v4, v5, GTEST_NONFATAL_FAILURE_)
+#define ASSERT_PRED_FORMAT5(pred_format, v1, v2, v3, v4, v5) \
+ GTEST_PRED_FORMAT5_(pred_format, v1, v2, v3, v4, v5, GTEST_FATAL_FAILURE_)
+#define ASSERT_PRED5(pred, v1, v2, v3, v4, v5) \
+ GTEST_PRED5_(pred, v1, v2, v3, v4, v5, GTEST_FATAL_FAILURE_)
+
+
+
+#endif // GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_
+
+// Macros for testing equalities and inequalities.
+//
+// * {ASSERT|EXPECT}_EQ(expected, actual): Tests that expected == actual
+// * {ASSERT|EXPECT}_NE(v1, v2): Tests that v1 != v2
+// * {ASSERT|EXPECT}_LT(v1, v2): Tests that v1 < v2
+// * {ASSERT|EXPECT}_LE(v1, v2): Tests that v1 <= v2
+// * {ASSERT|EXPECT}_GT(v1, v2): Tests that v1 > v2
+// * {ASSERT|EXPECT}_GE(v1, v2): Tests that v1 >= v2
+//
+// When they are not, Google Test prints both the tested expressions and
+// their actual values. The values must be compatible built-in types,
+// or you will get a compiler error. By "compatible" we mean that the
+// values can be compared by the respective operator.
+//
+// Note:
+//
+// 1. It is possible to make a user-defined type work with
+// {ASSERT|EXPECT}_??(), but that requires overloading the
+// comparison operators and is thus discouraged by the Google C++
+// Usage Guide. Therefore, you are advised to use the
+// {ASSERT|EXPECT}_TRUE() macro to assert that two objects are
+// equal.
+//
+// 2. The {ASSERT|EXPECT}_??() macros do pointer comparisons on
+// pointers (in particular, C strings). Therefore, if you use it
+// with two C strings, you are testing how their locations in memory
+// are related, not how their content is related. To compare two C
+// strings by content, use {ASSERT|EXPECT}_STR*().
+//
+// 3. {ASSERT|EXPECT}_EQ(expected, actual) is preferred to
+// {ASSERT|EXPECT}_TRUE(expected == actual), as the former tells you
+// what the actual value is when it fails, and similarly for the
+// other comparisons.
+//
+// 4. Do not depend on the order in which {ASSERT|EXPECT}_??()
+// evaluate their arguments, which is undefined.
+//
+// 5. These macros evaluate their arguments exactly once.
+//
+// Examples:
+//
+// EXPECT_NE(5, Foo());
+// EXPECT_EQ(NULL, a_pointer);
+// ASSERT_LT(i, array_size);
+// ASSERT_GT(records.size(), 0) << "There is no record left.";
+
+#define EXPECT_EQ(expected, actual) \
+ EXPECT_PRED_FORMAT2(::testing::internal:: \
+ EqHelper<GTEST_IS_NULL_LITERAL_(expected)>::Compare, \
+ expected, actual)
+#define EXPECT_NE(expected, actual) \
+ EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperNE, expected, actual)
+#define EXPECT_LE(val1, val2) \
+ EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperLE, val1, val2)
+#define EXPECT_LT(val1, val2) \
+ EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperLT, val1, val2)
+#define EXPECT_GE(val1, val2) \
+ EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperGE, val1, val2)
+#define EXPECT_GT(val1, val2) \
+ EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperGT, val1, val2)
+
+#define GTEST_ASSERT_EQ(expected, actual) \
+ ASSERT_PRED_FORMAT2(::testing::internal:: \
+ EqHelper<GTEST_IS_NULL_LITERAL_(expected)>::Compare, \
+ expected, actual)
+#define GTEST_ASSERT_NE(val1, val2) \
+ ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperNE, val1, val2)
+#define GTEST_ASSERT_LE(val1, val2) \
+ ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperLE, val1, val2)
+#define GTEST_ASSERT_LT(val1, val2) \
+ ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperLT, val1, val2)
+#define GTEST_ASSERT_GE(val1, val2) \
+ ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperGE, val1, val2)
+#define GTEST_ASSERT_GT(val1, val2) \
+ ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperGT, val1, val2)
+
+// Define macro GTEST_DONT_DEFINE_ASSERT_XY to 1 to omit the definition of
+// ASSERT_XY(), which clashes with some users' own code.
+
+#if !GTEST_DONT_DEFINE_ASSERT_EQ
+# define ASSERT_EQ(val1, val2) GTEST_ASSERT_EQ(val1, val2)
+#endif
+
+#if !GTEST_DONT_DEFINE_ASSERT_NE
+# define ASSERT_NE(val1, val2) GTEST_ASSERT_NE(val1, val2)
+#endif
+
+#if !GTEST_DONT_DEFINE_ASSERT_LE
+# define ASSERT_LE(val1, val2) GTEST_ASSERT_LE(val1, val2)
+#endif
+
+#if !GTEST_DONT_DEFINE_ASSERT_LT
+# define ASSERT_LT(val1, val2) GTEST_ASSERT_LT(val1, val2)
+#endif
+
+#if !GTEST_DONT_DEFINE_ASSERT_GE
+# define ASSERT_GE(val1, val2) GTEST_ASSERT_GE(val1, val2)
+#endif
+
+#if !GTEST_DONT_DEFINE_ASSERT_GT
+# define ASSERT_GT(val1, val2) GTEST_ASSERT_GT(val1, val2)
+#endif
+
+// C-string Comparisons. All tests treat NULL and any non-NULL string
+// as different. Two NULLs are equal.
+//
+// * {ASSERT|EXPECT}_STREQ(s1, s2): Tests that s1 == s2
+// * {ASSERT|EXPECT}_STRNE(s1, s2): Tests that s1 != s2
+// * {ASSERT|EXPECT}_STRCASEEQ(s1, s2): Tests that s1 == s2, ignoring case
+// * {ASSERT|EXPECT}_STRCASENE(s1, s2): Tests that s1 != s2, ignoring case
+//
+// For wide or narrow string objects, you can use the
+// {ASSERT|EXPECT}_??() macros.
+//
+// Don't depend on the order in which the arguments are evaluated,
+// which is undefined.
+//
+// These macros evaluate their arguments exactly once.
+
+#define EXPECT_STREQ(expected, actual) \
+ EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperSTREQ, expected, actual)
+#define EXPECT_STRNE(s1, s2) \
+ EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperSTRNE, s1, s2)
+#define EXPECT_STRCASEEQ(expected, actual) \
+ EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperSTRCASEEQ, expected, actual)
+#define EXPECT_STRCASENE(s1, s2)\
+ EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperSTRCASENE, s1, s2)
+
+#define ASSERT_STREQ(expected, actual) \
+ ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperSTREQ, expected, actual)
+#define ASSERT_STRNE(s1, s2) \
+ ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperSTRNE, s1, s2)
+#define ASSERT_STRCASEEQ(expected, actual) \
+ ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperSTRCASEEQ, expected, actual)
+#define ASSERT_STRCASENE(s1, s2)\
+ ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperSTRCASENE, s1, s2)
+
+// Macros for comparing floating-point numbers.
+//
+// * {ASSERT|EXPECT}_FLOAT_EQ(expected, actual):
+// Tests that two float values are almost equal.
+// * {ASSERT|EXPECT}_DOUBLE_EQ(expected, actual):
+// Tests that two double values are almost equal.
+// * {ASSERT|EXPECT}_NEAR(v1, v2, abs_error):
+// Tests that v1 and v2 are within the given distance to each other.
+//
+// Google Test uses ULP-based comparison to automatically pick a default
+// error bound that is appropriate for the operands. See the
+// FloatingPoint template class in gtest-internal.h if you are
+// interested in the implementation details.
+
+#define EXPECT_FLOAT_EQ(expected, actual)\
+ EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperFloatingPointEQ<float>, \
+ expected, actual)
+
+#define EXPECT_DOUBLE_EQ(expected, actual)\
+ EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperFloatingPointEQ<double>, \
+ expected, actual)
+
+#define ASSERT_FLOAT_EQ(expected, actual)\
+ ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperFloatingPointEQ<float>, \
+ expected, actual)
+
+#define ASSERT_DOUBLE_EQ(expected, actual)\
+ ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperFloatingPointEQ<double>, \
+ expected, actual)
+
+#define EXPECT_NEAR(val1, val2, abs_error)\
+ EXPECT_PRED_FORMAT3(::testing::internal::DoubleNearPredFormat, \
+ val1, val2, abs_error)
+
+#define ASSERT_NEAR(val1, val2, abs_error)\
+ ASSERT_PRED_FORMAT3(::testing::internal::DoubleNearPredFormat, \
+ val1, val2, abs_error)
+
+// These predicate format functions work on floating-point values, and
+// can be used in {ASSERT|EXPECT}_PRED_FORMAT2*(), e.g.
+//
+// EXPECT_PRED_FORMAT2(testing::DoubleLE, Foo(), 5.0);
+
+// Asserts that val1 is less than, or almost equal to, val2. Fails
+// otherwise. In particular, it fails if either val1 or val2 is NaN.
+GTEST_API_ AssertionResult FloatLE(const char* expr1, const char* expr2,
+ float val1, float val2);
+GTEST_API_ AssertionResult DoubleLE(const char* expr1, const char* expr2,
+ double val1, double val2);
+
+
+#if GTEST_OS_WINDOWS
+
+// Macros that test for HRESULT failure and success, these are only useful
+// on Windows, and rely on Windows SDK macros and APIs to compile.
+//
+// * {ASSERT|EXPECT}_HRESULT_{SUCCEEDED|FAILED}(expr)
+//
+// When expr unexpectedly fails or succeeds, Google Test prints the
+// expected result and the actual result with both a human-readable
+// string representation of the error, if available, as well as the
+// hex result code.
+# define EXPECT_HRESULT_SUCCEEDED(expr) \
+ EXPECT_PRED_FORMAT1(::testing::internal::IsHRESULTSuccess, (expr))
+
+# define ASSERT_HRESULT_SUCCEEDED(expr) \
+ ASSERT_PRED_FORMAT1(::testing::internal::IsHRESULTSuccess, (expr))
+
+# define EXPECT_HRESULT_FAILED(expr) \
+ EXPECT_PRED_FORMAT1(::testing::internal::IsHRESULTFailure, (expr))
+
+# define ASSERT_HRESULT_FAILED(expr) \
+ ASSERT_PRED_FORMAT1(::testing::internal::IsHRESULTFailure, (expr))
+
+#endif // GTEST_OS_WINDOWS
+
+// Macros that execute statement and check that it doesn't generate new fatal
+// failures in the current thread.
+//
+// * {ASSERT|EXPECT}_NO_FATAL_FAILURE(statement);
+//
+// Examples:
+//
+// EXPECT_NO_FATAL_FAILURE(Process());
+// ASSERT_NO_FATAL_FAILURE(Process()) << "Process() failed";
+//
+#define ASSERT_NO_FATAL_FAILURE(statement) \
+ GTEST_TEST_NO_FATAL_FAILURE_(statement, GTEST_FATAL_FAILURE_)
+#define EXPECT_NO_FATAL_FAILURE(statement) \
+ GTEST_TEST_NO_FATAL_FAILURE_(statement, GTEST_NONFATAL_FAILURE_)
+
+// Causes a trace (including the source file path, the current line
+// number, and the given message) to be included in every test failure
+// message generated by code in the current scope. The effect is
+// undone when the control leaves the current scope.
+//
+// The message argument can be anything streamable to std::ostream.
+//
+// In the implementation, we include the current line number as part
+// of the dummy variable name, thus allowing multiple SCOPED_TRACE()s
+// to appear in the same block - as long as they are on different
+// lines.
+#define SCOPED_TRACE(message) \
+ ::testing::internal::ScopedTrace GTEST_CONCAT_TOKEN_(gtest_trace_, __LINE__)(\
+ __FILE__, __LINE__, ::testing::Message() << (message))
+
+// Compile-time assertion for type equality.
+// StaticAssertTypeEq<type1, type2>() compiles iff type1 and type2 are
+// the same type. The value it returns is not interesting.
+//
+// Instead of making StaticAssertTypeEq a class template, we make it a
+// function template that invokes a helper class template. This
+// prevents a user from misusing StaticAssertTypeEq<T1, T2> by
+// defining objects of that type.
+//
+// CAVEAT:
+//
+// When used inside a method of a class template,
+// StaticAssertTypeEq<T1, T2>() is effective ONLY IF the method is
+// instantiated. For example, given:
+//
+// template <typename T> class Foo {
+// public:
+// void Bar() { testing::StaticAssertTypeEq<int, T>(); }
+// };
+//
+// the code:
+//
+// void Test1() { Foo<bool> foo; }
+//
+// will NOT generate a compiler error, as Foo<bool>::Bar() is never
+// actually instantiated. Instead, you need:
+//
+// void Test2() { Foo<bool> foo; foo.Bar(); }
+//
+// to cause a compiler error.
+template <typename T1, typename T2>
+bool StaticAssertTypeEq() {
+ (void)internal::StaticAssertTypeEqHelper<T1, T2>();
+ return true;
+}
+
+// Defines a test.
+//
+// The first parameter is the name of the test case, and the second
+// parameter is the name of the test within the test case.
+//
+// The convention is to end the test case name with "Test". For
+// example, a test case for the Foo class can be named FooTest.
+//
+// The user should put his test code between braces after using this
+// macro. Example:
+//
+// TEST(FooTest, InitializesCorrectly) {
+// Foo foo;
+// EXPECT_TRUE(foo.StatusIsOK());
+// }
+
+// Note that we call GetTestTypeId() instead of GetTypeId<
+// ::testing::Test>() here to get the type ID of testing::Test. This
+// is to work around a suspected linker bug when using Google Test as
+// a framework on Mac OS X. The bug causes GetTypeId<
+// ::testing::Test>() to return different values depending on whether
+// the call is from the Google Test framework itself or from user test
+// code. GetTestTypeId() is guaranteed to always return the same
+// value, as it always calls GetTypeId<>() from the Google Test
+// framework.
+#define GTEST_TEST(test_case_name, test_name)\
+ GTEST_TEST_(test_case_name, test_name, \
+ ::testing::Test, ::testing::internal::GetTestTypeId())
+
+// Define this macro to 1 to omit the definition of TEST(), which
+// is a generic name and clashes with some other libraries.
+#if !GTEST_DONT_DEFINE_TEST
+# define TEST(test_case_name, test_name) GTEST_TEST(test_case_name, test_name)
+#endif
+
+// Defines a test that uses a test fixture.
+//
+// The first parameter is the name of the test fixture class, which
+// also doubles as the test case name. The second parameter is the
+// name of the test within the test case.
+//
+// A test fixture class must be declared earlier. The user should put
+// his test code between braces after using this macro. Example:
+//
+// class FooTest : public testing::Test {
+// protected:
+// virtual void SetUp() { b_.AddElement(3); }
+//
+// Foo a_;
+// Foo b_;
+// };
+//
+// TEST_F(FooTest, InitializesCorrectly) {
+// EXPECT_TRUE(a_.StatusIsOK());
+// }
+//
+// TEST_F(FooTest, ReturnsElementCountCorrectly) {
+// EXPECT_EQ(0, a_.size());
+// EXPECT_EQ(1, b_.size());
+// }
+
+#define TEST_F(test_fixture, test_name)\
+ GTEST_TEST_(test_fixture, test_name, test_fixture, \
+ ::testing::internal::GetTypeId<test_fixture>())
+
+} // namespace testing
+
+// Use this function in main() to run all tests. It returns 0 if all
+// tests are successful, or 1 otherwise.
+//
+// RUN_ALL_TESTS() should be invoked after the command line has been
+// parsed by InitGoogleTest().
+//
+// This function was formerly a macro; thus, it is in the global
+// namespace and has an all-caps name.
+int RUN_ALL_TESTS() GTEST_MUST_USE_RESULT_;
+
+inline int RUN_ALL_TESTS() {
+ return ::testing::UnitTest::GetInstance()->Run();
+}
+
+#endif // GTEST_INCLUDE_GTEST_GTEST_H_
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/ts/ts_perf.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/ts/ts_perf.hpp
new file mode 100644
index 00000000..18309061
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/ts/ts_perf.hpp
@@ -0,0 +1,618 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_TS_PERF_HPP__
+#define __OPENCV_TS_PERF_HPP__
+
+#include "opencv2/core/core.hpp"
+#include "opencv2/imgproc/imgproc.hpp"
+#include "opencv2/features2d/features2d.hpp"
+#include "ts_gtest.h"
+
+#ifdef HAVE_TBB
+#include "tbb/task_scheduler_init.h"
+#endif
+
+#if !(defined(LOGD) || defined(LOGI) || defined(LOGW) || defined(LOGE))
+# if defined(ANDROID) && defined(USE_ANDROID_LOGGING)
+# include <android/log.h>
+
+# define PERF_TESTS_LOG_TAG "OpenCV_perf"
+# define LOGD(...) ((void)__android_log_print(ANDROID_LOG_DEBUG, PERF_TESTS_LOG_TAG, __VA_ARGS__))
+# define LOGI(...) ((void)__android_log_print(ANDROID_LOG_INFO, PERF_TESTS_LOG_TAG, __VA_ARGS__))
+# define LOGW(...) ((void)__android_log_print(ANDROID_LOG_WARN, PERF_TESTS_LOG_TAG, __VA_ARGS__))
+# define LOGE(...) ((void)__android_log_print(ANDROID_LOG_ERROR, PERF_TESTS_LOG_TAG, __VA_ARGS__))
+# else
+# define LOGD(_str, ...) do{printf(_str , ## __VA_ARGS__); printf("\n");fflush(stdout);} while(0)
+# define LOGI(_str, ...) do{printf(_str , ## __VA_ARGS__); printf("\n");fflush(stdout);} while(0)
+# define LOGW(_str, ...) do{printf(_str , ## __VA_ARGS__); printf("\n");fflush(stdout);} while(0)
+# define LOGE(_str, ...) do{printf(_str , ## __VA_ARGS__); printf("\n");fflush(stdout);} while(0)
+# endif
+#endif
+
+// declare major namespaces to avoid errors on unknown namespace
+namespace cv { namespace gpu {} namespace ocl {} }
+
+namespace perf
+{
+class TestBase;
+
+/*****************************************************************************************\
+* Predefined typical frame sizes and typical test parameters *
+\*****************************************************************************************/
+const cv::Size szQVGA = cv::Size(320, 240);
+const cv::Size szVGA = cv::Size(640, 480);
+const cv::Size szSVGA = cv::Size(800, 600);
+const cv::Size szXGA = cv::Size(1024, 768);
+const cv::Size szSXGA = cv::Size(1280, 1024);
+const cv::Size szWQHD = cv::Size(2560, 1440);
+
+const cv::Size sznHD = cv::Size(640, 360);
+const cv::Size szqHD = cv::Size(960, 540);
+const cv::Size sz240p = szQVGA;
+const cv::Size sz720p = cv::Size(1280, 720);
+const cv::Size sz1080p = cv::Size(1920, 1080);
+const cv::Size sz1440p = szWQHD;
+const cv::Size sz2160p = cv::Size(3840, 2160);//UHDTV1 4K
+const cv::Size sz4320p = cv::Size(7680, 4320);//UHDTV2 8K
+
+const cv::Size sz3MP = cv::Size(2048, 1536);
+const cv::Size sz5MP = cv::Size(2592, 1944);
+const cv::Size sz2K = cv::Size(2048, 2048);
+
+const cv::Size szODD = cv::Size(127, 61);
+
+const cv::Size szSmall24 = cv::Size(24, 24);
+const cv::Size szSmall32 = cv::Size(32, 32);
+const cv::Size szSmall64 = cv::Size(64, 64);
+const cv::Size szSmall128 = cv::Size(128, 128);
+
+#define SZ_ALL_VGA ::testing::Values(::perf::szQVGA, ::perf::szVGA, ::perf::szSVGA)
+#define SZ_ALL_GA ::testing::Values(::perf::szQVGA, ::perf::szVGA, ::perf::szSVGA, ::perf::szXGA, ::perf::szSXGA)
+#define SZ_ALL_HD ::testing::Values(::perf::sznHD, ::perf::szqHD, ::perf::sz720p, ::perf::sz1080p)
+#define SZ_ALL_SMALL ::testing::Values(::perf::szSmall24, ::perf::szSmall32, ::perf::szSmall64, ::perf::szSmall128)
+#define SZ_ALL ::testing::Values(::perf::szQVGA, ::perf::szVGA, ::perf::szSVGA, ::perf::szXGA, ::perf::szSXGA, ::perf::sznHD, ::perf::szqHD, ::perf::sz720p, ::perf::sz1080p)
+#define SZ_TYPICAL ::testing::Values(::perf::szVGA, ::perf::szqHD, ::perf::sz720p, ::perf::szODD)
+
+
+#define TYPICAL_MAT_SIZES ::perf::szVGA, ::perf::sz720p, ::perf::sz1080p, ::perf::szODD
+#define TYPICAL_MAT_TYPES CV_8UC1, CV_8UC4, CV_32FC1
+#define TYPICAL_MATS testing::Combine( testing::Values( TYPICAL_MAT_SIZES ), testing::Values( TYPICAL_MAT_TYPES ) )
+#define TYPICAL_MATS_C1 testing::Combine( testing::Values( TYPICAL_MAT_SIZES ), testing::Values( CV_8UC1, CV_32FC1 ) )
+#define TYPICAL_MATS_C4 testing::Combine( testing::Values( TYPICAL_MAT_SIZES ), testing::Values( CV_8UC4 ) )
+
+
+/*****************************************************************************************\
+* MatType - printable wrapper over integer 'type' of Mat *
+\*****************************************************************************************/
+class MatType
+{
+public:
+ MatType(int val=0) : _type(val) {}
+ operator int() const {return _type;}
+
+private:
+ int _type;
+};
+
+/*****************************************************************************************\
+* CV_ENUM and CV_FLAGS - macro to create printable wrappers for defines and enums *
+\*****************************************************************************************/
+
+#define CV_ENUM(class_name, ...) \
+ namespace { \
+ struct class_name { \
+ class_name(int val = 0) : val_(val) {} \
+ operator int() const { return val_; } \
+ void PrintTo(std::ostream* os) const { \
+ using namespace cv;using namespace cv::gpu; using namespace cv::ocl; \
+ const int vals[] = { __VA_ARGS__ }; \
+ const char* svals = #__VA_ARGS__; \
+ for(int i = 0, pos = 0; i < (int)(sizeof(vals)/sizeof(int)); ++i) { \
+ while(isspace(svals[pos]) || svals[pos] == ',') ++pos; \
+ int start = pos; \
+ while(!(isspace(svals[pos]) || svals[pos] == ',' || svals[pos] == 0)) \
+ ++pos; \
+ if (val_ == vals[i]) { \
+ *os << std::string(svals + start, svals + pos); \
+ return; \
+ } \
+ } \
+ *os << "UNKNOWN"; \
+ } \
+ static ::testing::internal::ParamGenerator<class_name> all() { \
+ using namespace cv;using namespace cv::gpu; using namespace cv::ocl; \
+ static class_name vals[] = { __VA_ARGS__ }; \
+ return ::testing::ValuesIn(vals); \
+ } \
+ private: int val_; \
+ }; \
+ inline void PrintTo(const class_name& t, std::ostream* os) { t.PrintTo(os); } }
+
+#define CV_FLAGS(class_name, ...) \
+ namespace { \
+ struct class_name { \
+ class_name(int val = 0) : val_(val) {} \
+ operator int() const { return val_; } \
+ void PrintTo(std::ostream* os) const { \
+ using namespace cv;using namespace cv::gpu; using namespace cv::ocl; \
+ const int vals[] = { __VA_ARGS__ }; \
+ const char* svals = #__VA_ARGS__; \
+ int value = val_; \
+ bool first = true; \
+ for(int i = 0, pos = 0; i < (int)(sizeof(vals)/sizeof(int)); ++i) { \
+ while(isspace(svals[pos]) || svals[pos] == ',') ++pos; \
+ int start = pos; \
+ while(!(isspace(svals[pos]) || svals[pos] == ',' || svals[pos] == 0)) \
+ ++pos; \
+ if ((value & vals[i]) == vals[i]) { \
+ value &= ~vals[i]; \
+ if (first) first = false; else *os << "|"; \
+ *os << std::string(svals + start, svals + pos); \
+ if (!value) return; \
+ } \
+ } \
+ if (first) *os << "UNKNOWN"; \
+ } \
+ private: int val_; \
+ }; \
+ inline void PrintTo(const class_name& t, std::ostream* os) { t.PrintTo(os); } }
+
+CV_ENUM(MatDepth, CV_8U, CV_8S, CV_16U, CV_16S, CV_32S, CV_32F, CV_64F, CV_USRTYPE1)
+
+/*****************************************************************************************\
+* Regression control utility for performance testing *
+\*****************************************************************************************/
+enum ERROR_TYPE
+{
+ ERROR_ABSOLUTE = 0,
+ ERROR_RELATIVE = 1
+};
+
+class CV_EXPORTS Regression
+{
+public:
+ static Regression& add(TestBase* test, const std::string& name, cv::InputArray array, double eps = DBL_EPSILON, ERROR_TYPE err = ERROR_ABSOLUTE);
+ static Regression& addKeypoints(TestBase* test, const std::string& name, const std::vector<cv::KeyPoint>& array, double eps = DBL_EPSILON, ERROR_TYPE err = ERROR_ABSOLUTE);
+ static Regression& addMatches(TestBase* test, const std::string& name, const std::vector<cv::DMatch>& array, double eps = DBL_EPSILON, ERROR_TYPE err = ERROR_ABSOLUTE);
+ static void Init(const std::string& testSuitName, const std::string& ext = ".xml");
+
+ Regression& operator() (const std::string& name, cv::InputArray array, double eps = DBL_EPSILON, ERROR_TYPE err = ERROR_ABSOLUTE);
+
+private:
+ static Regression& instance();
+ Regression();
+ ~Regression();
+
+ Regression(const Regression&);
+ Regression& operator=(const Regression&);
+
+ cv::RNG regRNG;//own random numbers generator to make collection and verification work identical
+ std::string storageInPath;
+ std::string storageOutPath;
+ cv::FileStorage storageIn;
+ cv::FileStorage storageOut;
+ cv::FileNode rootIn;
+ std::string currentTestNodeName;
+ std::string suiteName;
+
+ cv::FileStorage& write();
+
+ static std::string getCurrentTestNodeName();
+ static bool isVector(cv::InputArray a);
+ static double getElem(cv::Mat& m, int x, int y, int cn = 0);
+
+ void init(const std::string& testSuitName, const std::string& ext);
+ void write(cv::InputArray array);
+ void write(cv::Mat m);
+ void verify(cv::FileNode node, cv::InputArray array, double eps, ERROR_TYPE err);
+ void verify(cv::FileNode node, cv::Mat actual, double eps, std::string argname, ERROR_TYPE err);
+};
+
+#define SANITY_CHECK(array, ...) ::perf::Regression::add(this, #array, array , ## __VA_ARGS__)
+#define SANITY_CHECK_KEYPOINTS(array, ...) ::perf::Regression::addKeypoints(this, #array, array , ## __VA_ARGS__)
+#define SANITY_CHECK_MATCHES(array, ...) ::perf::Regression::addMatches(this, #array, array , ## __VA_ARGS__)
+#define SANITY_CHECK_NOTHING() this->setVerified()
+
+class CV_EXPORTS GpuPerf
+{
+public:
+ static bool targetDevice();
+};
+
+#define PERF_RUN_GPU() ::perf::GpuPerf::targetDevice()
+
+/*****************************************************************************************\
+* Container for performance metrics *
+\*****************************************************************************************/
+typedef struct CV_EXPORTS performance_metrics
+{
+ size_t bytesIn;
+ size_t bytesOut;
+ unsigned int samples;
+ unsigned int outliers;
+ double gmean;
+ double gstddev;//stddev for log(time)
+ double mean;
+ double stddev;
+ double median;
+ double min;
+ double frequency;
+ int terminationReason;
+
+ enum
+ {
+ TERM_ITERATIONS = 0,
+ TERM_TIME = 1,
+ TERM_INTERRUPT = 2,
+ TERM_EXCEPTION = 3,
+ TERM_SKIP_TEST = 4, // there are some limitations and test should be skipped
+ TERM_UNKNOWN = -1
+ };
+
+ performance_metrics();
+ void clear();
+} performance_metrics;
+
+
+/*****************************************************************************************\
+* Strategy for performance measuring *
+\*****************************************************************************************/
+enum PERF_STRATEGY
+{
+ PERF_STRATEGY_BASE = 0,
+ PERF_STRATEGY_SIMPLE = 1
+};
+
+
+/*****************************************************************************************\
+* Base fixture for performance tests *
+\*****************************************************************************************/
+class CV_EXPORTS TestBase: public ::testing::Test
+{
+public:
+ TestBase();
+
+ static void Init(int argc, const char* const argv[]);
+ static void Init(const std::vector<std::string> & availableImpls,
+ int argc, const char* const argv[]);
+ static void RecordRunParameters();
+ static std::string getDataPath(const std::string& relativePath);
+ static std::string getSelectedImpl();
+
+ static enum PERF_STRATEGY getPerformanceStrategy();
+ static enum PERF_STRATEGY setPerformanceStrategy(enum PERF_STRATEGY strategy);
+
+ class PerfSkipTestException: public cv::Exception {};
+
+protected:
+ virtual void PerfTestBody() = 0;
+
+ virtual void SetUp();
+ virtual void TearDown();
+
+ void startTimer();
+ void stopTimer();
+ bool next();
+
+ //_declareHelper declare;
+
+ enum
+ {
+ WARMUP_READ,
+ WARMUP_WRITE,
+ WARMUP_RNG,
+ WARMUP_NONE
+ };
+
+ void reportMetrics(bool toJUnitXML = false);
+ static void warmup(cv::InputOutputArray a, int wtype = WARMUP_READ);
+
+ performance_metrics& calcMetrics();
+ void RunPerfTestBody();
+private:
+ typedef std::vector<std::pair<int, cv::Size> > SizeVector;
+ typedef std::vector<int64> TimeVector;
+
+ SizeVector inputData;
+ SizeVector outputData;
+ unsigned int getTotalInputSize() const;
+ unsigned int getTotalOutputSize() const;
+
+ TimeVector times;
+ int64 lastTime;
+ int64 totalTime;
+ int64 timeLimit;
+ static int64 timeLimitDefault;
+ static unsigned int iterationsLimitDefault;
+
+ unsigned int nIters;
+ unsigned int currentIter;
+ unsigned int runsPerIteration;
+
+ performance_metrics metrics;
+ void validateMetrics();
+
+ static int64 _timeadjustment;
+ static int64 _calibrate();
+
+ static void warmup_impl(cv::Mat m, int wtype);
+ static int getSizeInBytes(cv::InputArray a);
+ static cv::Size getSize(cv::InputArray a);
+ static void declareArray(SizeVector& sizes, cv::InputOutputArray a, int wtype = 0);
+
+ class CV_EXPORTS _declareHelper
+ {
+ public:
+ _declareHelper& in(cv::InputOutputArray a1, int wtype = WARMUP_READ);
+ _declareHelper& in(cv::InputOutputArray a1, cv::InputOutputArray a2, int wtype = WARMUP_READ);
+ _declareHelper& in(cv::InputOutputArray a1, cv::InputOutputArray a2, cv::InputOutputArray a3, int wtype = WARMUP_READ);
+ _declareHelper& in(cv::InputOutputArray a1, cv::InputOutputArray a2, cv::InputOutputArray a3, cv::InputOutputArray a4, int wtype = WARMUP_READ);
+
+ _declareHelper& out(cv::InputOutputArray a1, int wtype = WARMUP_WRITE);
+ _declareHelper& out(cv::InputOutputArray a1, cv::InputOutputArray a2, int wtype = WARMUP_WRITE);
+ _declareHelper& out(cv::InputOutputArray a1, cv::InputOutputArray a2, cv::InputOutputArray a3, int wtype = WARMUP_WRITE);
+ _declareHelper& out(cv::InputOutputArray a1, cv::InputOutputArray a2, cv::InputOutputArray a3, cv::InputOutputArray a4, int wtype = WARMUP_WRITE);
+
+ _declareHelper& iterations(unsigned int n);
+ _declareHelper& time(double timeLimitSecs);
+ _declareHelper& tbb_threads(int n = -1);
+ _declareHelper& runs(unsigned int runsNumber);
+ private:
+ TestBase* test;
+ _declareHelper(TestBase* t);
+ _declareHelper(const _declareHelper&);
+ _declareHelper& operator=(const _declareHelper&);
+ friend class TestBase;
+ };
+ friend class _declareHelper;
+
+ bool verified;
+
+public:
+ _declareHelper declare;
+
+ void setVerified() { this->verified = true; }
+};
+
+template<typename T> class TestBaseWithParam: public TestBase, public ::testing::WithParamInterface<T> {};
+
+typedef std::tr1::tuple<cv::Size, MatType> Size_MatType_t;
+typedef TestBaseWithParam<Size_MatType_t> Size_MatType;
+
+/*****************************************************************************************\
+* Print functions for googletest *
+\*****************************************************************************************/
+CV_EXPORTS void PrintTo(const MatType& t, std::ostream* os);
+
+} //namespace perf
+
+namespace cv
+{
+
+CV_EXPORTS void PrintTo(const Size& sz, ::std::ostream* os);
+
+} //namespace cv
+
+
+/*****************************************************************************************\
+* Macro definitions for performance tests *
+\*****************************************************************************************/
+#define PERF_PROXY_NAMESPACE_NAME_(test_case_name, test_name) \
+ test_case_name##_##test_name##_perf_namespace_proxy
+
+// Defines a performance test.
+//
+// The first parameter is the name of the test case, and the second
+// parameter is the name of the test within the test case.
+//
+// The user should put his test code between braces after using this
+// macro. Example:
+//
+// PERF_TEST(FooTest, InitializesCorrectly) {
+// Foo foo;
+// EXPECT_TRUE(foo.StatusIsOK());
+// }
+#define PERF_TEST(test_case_name, test_name)\
+ namespace PERF_PROXY_NAMESPACE_NAME_(test_case_name, test_name) {\
+ class TestBase {/*compile error for this class means that you are trying to use perf::TestBase as a fixture*/};\
+ class test_case_name : public ::perf::TestBase {\
+ public:\
+ test_case_name() {}\
+ protected:\
+ virtual void PerfTestBody();\
+ };\
+ TEST_F(test_case_name, test_name){ RunPerfTestBody(); }\
+ }\
+ void PERF_PROXY_NAMESPACE_NAME_(test_case_name, test_name)::test_case_name::PerfTestBody()
+
+// Defines a performance test that uses a test fixture.
+//
+// The first parameter is the name of the test fixture class, which
+// also doubles as the test case name. The second parameter is the
+// name of the test within the test case.
+//
+// A test fixture class must be declared earlier. The user should put
+// his test code between braces after using this macro. Example:
+//
+// class FooTest : public ::perf::TestBase {
+// protected:
+// virtual void SetUp() { TestBase::SetUp(); b_.AddElement(3); }
+//
+// Foo a_;
+// Foo b_;
+// };
+//
+// PERF_TEST_F(FooTest, InitializesCorrectly) {
+// EXPECT_TRUE(a_.StatusIsOK());
+// }
+//
+// PERF_TEST_F(FooTest, ReturnsElementCountCorrectly) {
+// EXPECT_EQ(0, a_.size());
+// EXPECT_EQ(1, b_.size());
+// }
+#define PERF_TEST_F(fixture, testname) \
+ namespace PERF_PROXY_NAMESPACE_NAME_(fixture, testname) {\
+ class TestBase {/*compile error for this class means that you are trying to use perf::TestBase as a fixture*/};\
+ class fixture : public ::fixture {\
+ public:\
+ fixture() {}\
+ protected:\
+ virtual void PerfTestBody();\
+ };\
+ TEST_F(fixture, testname){ RunPerfTestBody(); }\
+ }\
+ void PERF_PROXY_NAMESPACE_NAME_(fixture, testname)::fixture::PerfTestBody()
+
+// Defines a parametrized performance test.
+//
+// The first parameter is the name of the test fixture class, which
+// also doubles as the test case name. The second parameter is the
+// name of the test within the test case.
+//
+// The user should put his test code between braces after using this
+// macro. Example:
+//
+// typedef ::perf::TestBaseWithParam<cv::Size> FooTest;
+//
+// PERF_TEST_P(FooTest, DoTestingRight, ::testing::Values(::perf::szVGA, ::perf::sz720p) {
+// cv::Mat b(GetParam(), CV_8U, cv::Scalar(10));
+// cv::Mat a(GetParam(), CV_8U, cv::Scalar(20));
+// cv::Mat c(GetParam(), CV_8U, cv::Scalar(0));
+//
+// declare.in(a, b).out(c).time(0.5);
+//
+// TEST_CYCLE() cv::add(a, b, c);
+//
+// SANITY_CHECK(c);
+// }
+#define PERF_TEST_P(fixture, name, params) \
+ class fixture##_##name : public fixture {\
+ public:\
+ fixture##_##name() {}\
+ protected:\
+ virtual void PerfTestBody();\
+ };\
+ TEST_P(fixture##_##name, name /*perf*/){ RunPerfTestBody(); }\
+ INSTANTIATE_TEST_CASE_P(/*none*/, fixture##_##name, params);\
+ void fixture##_##name::PerfTestBody()
+
+#ifndef __CV_TEST_EXEC_ARGS
+#if defined(_MSC_VER) && (_MSC_VER <= 1400)
+#define __CV_TEST_EXEC_ARGS(...) \
+ while (++argc >= (--argc,-1)) {__VA_ARGS__; break;} /*this ugly construction is needed for VS 2005*/
+#else
+#define __CV_TEST_EXEC_ARGS(...) \
+ __VA_ARGS__;
+#endif
+#endif
+
+#define CV_PERF_TEST_MAIN_INTERNALS(modulename, impls, ...) \
+ ::perf::Regression::Init(#modulename); \
+ ::perf::TestBase::Init(std::vector<std::string>(impls, impls + sizeof impls / sizeof *impls), \
+ argc, argv); \
+ ::testing::InitGoogleTest(&argc, argv); \
+ cvtest::printVersionInfo(); \
+ ::testing::Test::RecordProperty("cv_module_name", #modulename); \
+ ::perf::TestBase::RecordRunParameters(); \
+ __CV_TEST_EXEC_ARGS(__VA_ARGS__) \
+ return RUN_ALL_TESTS();
+
+// impls must be an array, not a pointer; "plain" should always be one of the implementations
+#define CV_PERF_TEST_MAIN_WITH_IMPLS(modulename, impls, ...) \
+int main(int argc, char **argv)\
+{\
+ CV_PERF_TEST_MAIN_INTERNALS(modulename, impls, __VA_ARGS__)\
+}
+
+#define CV_PERF_TEST_MAIN(modulename, ...) \
+int main(int argc, char **argv)\
+{\
+ const char * plain_only[] = { "plain" };\
+ CV_PERF_TEST_MAIN_INTERNALS(modulename, plain_only, __VA_ARGS__)\
+}
+
+#define TEST_CYCLE_N(n) for(declare.iterations(n); startTimer(), next(); stopTimer())
+#define TEST_CYCLE() for(; startTimer(), next(); stopTimer())
+#define TEST_CYCLE_MULTIRUN(runsNum) for(declare.runs(runsNum); startTimer(), next(); stopTimer()) for(int r = 0; r < runsNum; ++r)
+
+namespace perf
+{
+namespace comparators
+{
+
+template<typename T>
+struct CV_EXPORTS RectLess_
+{
+ bool operator()(const cv::Rect_<T>& r1, const cv::Rect_<T>& r2) const
+ {
+ return r1.x < r2.x
+ || (r1.x == r2.x && r1.y < r2.y)
+ || (r1.x == r2.x && r1.y == r2.y && r1.width < r2.width)
+ || (r1.x == r2.x && r1.y == r2.y && r1.width == r2.width && r1.height < r2.height);
+ }
+};
+
+typedef RectLess_<int> RectLess;
+
+struct CV_EXPORTS KeypointGreater
+{
+ bool operator()(const cv::KeyPoint& kp1, const cv::KeyPoint& kp2) const
+ {
+ if(kp1.response > kp2.response) return true;
+ if(kp1.response < kp2.response) return false;
+ if(kp1.size > kp2.size) return true;
+ if(kp1.size < kp2.size) return false;
+ if(kp1.octave > kp2.octave) return true;
+ if(kp1.octave < kp2.octave) return false;
+ if(kp1.pt.y < kp2.pt.y) return false;
+ if(kp1.pt.y > kp2.pt.y) return true;
+ return kp1.pt.x < kp2.pt.x;
+ }
+};
+
+} //namespace comparators
+
+void CV_EXPORTS sort(std::vector<cv::KeyPoint>& pts, cv::InputOutputArray descriptors);
+} //namespace perf
+
+#endif //__OPENCV_TS_PERF_HPP__
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/video.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/video.hpp
new file mode 100644
index 00000000..be11ce47
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/video.hpp
@@ -0,0 +1,43 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#include "opencv2/video/video.hpp"
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/video/background_segm.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/video/background_segm.hpp
new file mode 100644
index 00000000..d2d068c6
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/video/background_segm.hpp
@@ -0,0 +1,263 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_BACKGROUND_SEGM_HPP__
+#define __OPENCV_BACKGROUND_SEGM_HPP__
+
+#include "opencv2/core/core.hpp"
+#include <list>
+namespace cv
+{
+
+/*!
+ The Base Class for Background/Foreground Segmentation
+
+ The class is only used to define the common interface for
+ the whole family of background/foreground segmentation algorithms.
+*/
+class CV_EXPORTS_W BackgroundSubtractor : public Algorithm
+{
+public:
+ //! the virtual destructor
+ virtual ~BackgroundSubtractor();
+ //! the update operator that takes the next video frame and returns the current foreground mask as 8-bit binary image.
+ CV_WRAP_AS(apply) virtual void operator()(InputArray image, OutputArray fgmask,
+ double learningRate=0);
+
+ //! computes a background image
+ virtual void getBackgroundImage(OutputArray backgroundImage) const;
+};
+
+
+/*!
+ Gaussian Mixture-based Backbround/Foreground Segmentation Algorithm
+
+ The class implements the following algorithm:
+ "An improved adaptive background mixture model for real-time tracking with shadow detection"
+ P. KadewTraKuPong and R. Bowden,
+ Proc. 2nd European Workshp on Advanced Video-Based Surveillance Systems, 2001."
+ http://personal.ee.surrey.ac.uk/Personal/R.Bowden/publications/avbs01/avbs01.pdf
+
+*/
+class CV_EXPORTS_W BackgroundSubtractorMOG : public BackgroundSubtractor
+{
+public:
+ //! the default constructor
+ CV_WRAP BackgroundSubtractorMOG();
+ //! the full constructor that takes the length of the history, the number of gaussian mixtures, the background ratio parameter and the noise strength
+ CV_WRAP BackgroundSubtractorMOG(int history, int nmixtures, double backgroundRatio, double noiseSigma=0);
+ //! the destructor
+ virtual ~BackgroundSubtractorMOG();
+ //! the update operator
+ virtual void operator()(InputArray image, OutputArray fgmask, double learningRate=0);
+
+ //! re-initiaization method
+ virtual void initialize(Size frameSize, int frameType);
+
+ virtual AlgorithmInfo* info() const;
+
+protected:
+ Size frameSize;
+ int frameType;
+ Mat bgmodel;
+ int nframes;
+ int history;
+ int nmixtures;
+ double varThreshold;
+ double backgroundRatio;
+ double noiseSigma;
+};
+
+
+/*!
+ The class implements the following algorithm:
+ "Improved adaptive Gausian mixture model for background subtraction"
+ Z.Zivkovic
+ International Conference Pattern Recognition, UK, August, 2004.
+ http://www.zoranz.net/Publications/zivkovic2004ICPR.pdf
+*/
+class CV_EXPORTS_W BackgroundSubtractorMOG2 : public BackgroundSubtractor
+{
+public:
+ //! the default constructor
+ CV_WRAP BackgroundSubtractorMOG2();
+ //! the full constructor that takes the length of the history, the number of gaussian mixtures, the background ratio parameter and the noise strength
+ CV_WRAP BackgroundSubtractorMOG2(int history, float varThreshold, bool bShadowDetection=true);
+ //! the destructor
+ virtual ~BackgroundSubtractorMOG2();
+ //! the update operator
+ virtual void operator()(InputArray image, OutputArray fgmask, double learningRate=-1);
+
+ //! computes a background image which are the mean of all background gaussians
+ virtual void getBackgroundImage(OutputArray backgroundImage) const;
+
+ //! re-initiaization method
+ virtual void initialize(Size frameSize, int frameType);
+
+ virtual AlgorithmInfo* info() const;
+
+protected:
+ Size frameSize;
+ int frameType;
+ Mat bgmodel;
+ Mat bgmodelUsedModes;//keep track of number of modes per pixel
+ int nframes;
+ int history;
+ int nmixtures;
+ //! here it is the maximum allowed number of mixture components.
+ //! Actual number is determined dynamically per pixel
+ double varThreshold;
+ // threshold on the squared Mahalanobis distance to decide if it is well described
+ // by the background model or not. Related to Cthr from the paper.
+ // This does not influence the update of the background. A typical value could be 4 sigma
+ // and that is varThreshold=4*4=16; Corresponds to Tb in the paper.
+
+ /////////////////////////
+ // less important parameters - things you might change but be carefull
+ ////////////////////////
+ float backgroundRatio;
+ // corresponds to fTB=1-cf from the paper
+ // TB - threshold when the component becomes significant enough to be included into
+ // the background model. It is the TB=1-cf from the paper. So I use cf=0.1 => TB=0.
+ // For alpha=0.001 it means that the mode should exist for approximately 105 frames before
+ // it is considered foreground
+ // float noiseSigma;
+ float varThresholdGen;
+ //correspondts to Tg - threshold on the squared Mahalan. dist. to decide
+ //when a sample is close to the existing components. If it is not close
+ //to any a new component will be generated. I use 3 sigma => Tg=3*3=9.
+ //Smaller Tg leads to more generated components and higher Tg might make
+ //lead to small number of components but they can grow too large
+ float fVarInit;
+ float fVarMin;
+ float fVarMax;
+ //initial variance for the newly generated components.
+ //It will will influence the speed of adaptation. A good guess should be made.
+ //A simple way is to estimate the typical standard deviation from the images.
+ //I used here 10 as a reasonable value
+ // min and max can be used to further control the variance
+ float fCT;//CT - complexity reduction prior
+ //this is related to the number of samples needed to accept that a component
+ //actually exists. We use CT=0.05 of all the samples. By setting CT=0 you get
+ //the standard Stauffer&Grimson algorithm (maybe not exact but very similar)
+
+ //shadow detection parameters
+ bool bShadowDetection;//default 1 - do shadow detection
+ unsigned char nShadowDetection;//do shadow detection - insert this value as the detection result - 127 default value
+ float fTau;
+ // Tau - shadow threshold. The shadow is detected if the pixel is darker
+ //version of the background. Tau is a threshold on how much darker the shadow can be.
+ //Tau= 0.5 means that if pixel is more than 2 times darker then it is not shadow
+ //See: Prati,Mikic,Trivedi,Cucchiarra,"Detecting Moving Shadows...",IEEE PAMI,2003.
+};
+
+/**
+ * Background Subtractor module. Takes a series of images and returns a sequence of mask (8UC1)
+ * images of the same size, where 255 indicates Foreground and 0 represents Background.
+ * This class implements an algorithm described in "Visual Tracking of Human Visitors under
+ * Variable-Lighting Conditions for a Responsive Audio Art Installation," A. Godbehere,
+ * A. Matsukawa, K. Goldberg, American Control Conference, Montreal, June 2012.
+ */
+class CV_EXPORTS BackgroundSubtractorGMG: public cv::BackgroundSubtractor
+{
+public:
+ BackgroundSubtractorGMG();
+ virtual ~BackgroundSubtractorGMG();
+ virtual AlgorithmInfo* info() const;
+
+ /**
+ * Validate parameters and set up data structures for appropriate image size.
+ * Must call before running on data.
+ * @param frameSize input frame size
+ * @param min minimum value taken on by pixels in image sequence. Usually 0
+ * @param max maximum value taken on by pixels in image sequence. e.g. 1.0 or 255
+ */
+ void initialize(cv::Size frameSize, double min, double max);
+
+ /**
+ * Performs single-frame background subtraction and builds up a statistical background image
+ * model.
+ * @param image Input image
+ * @param fgmask Output mask image representing foreground and background pixels
+ * @param learningRate Determines how quickly features are "forgotten" from histograms
+ */
+ virtual void operator()(InputArray image, OutputArray fgmask, double learningRate=-1.0);
+
+ /**
+ * Releases all inner buffers.
+ */
+ void release();
+
+ //! Total number of distinct colors to maintain in histogram.
+ int maxFeatures;
+ //! Set between 0.0 and 1.0, determines how quickly features are "forgotten" from histograms.
+ double learningRate;
+ //! Number of frames of video to use to initialize histograms.
+ int numInitializationFrames;
+ //! Number of discrete levels in each channel to be used in histograms.
+ int quantizationLevels;
+ //! Prior probability that any given pixel is a background pixel. A sensitivity parameter.
+ double backgroundPrior;
+ //! Value above which pixel is determined to be FG.
+ double decisionThreshold;
+ //! Smoothing radius, in pixels, for cleaning up FG image.
+ int smoothingRadius;
+ //! Perform background model update
+ bool updateBackgroundModel;
+
+private:
+ double maxVal_;
+ double minVal_;
+
+ cv::Size frameSize_;
+ int frameNum_;
+
+ cv::Mat_<int> nfeatures_;
+ cv::Mat_<unsigned int> colors_;
+ cv::Mat_<float> weights_;
+
+ cv::Mat buf_;
+};
+
+}
+
+#endif
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/video/tracking.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/video/tracking.hpp
new file mode 100644
index 00000000..f09be806
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/video/tracking.hpp
@@ -0,0 +1,373 @@
+/*! \file tracking.hpp
+ \brief The Object and Feature Tracking
+ */
+
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_TRACKING_HPP__
+#define __OPENCV_TRACKING_HPP__
+
+#include "opencv2/core/core.hpp"
+#include "opencv2/imgproc/imgproc.hpp"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/****************************************************************************************\
+* Motion Analysis *
+\****************************************************************************************/
+
+/************************************ optical flow ***************************************/
+
+#define CV_LKFLOW_PYR_A_READY 1
+#define CV_LKFLOW_PYR_B_READY 2
+#define CV_LKFLOW_INITIAL_GUESSES 4
+#define CV_LKFLOW_GET_MIN_EIGENVALS 8
+
+/* It is Lucas & Kanade method, modified to use pyramids.
+ Also it does several iterations to get optical flow for
+ every point at every pyramid level.
+ Calculates optical flow between two images for certain set of points (i.e.
+ it is a "sparse" optical flow, which is opposite to the previous 3 methods) */
+CVAPI(void) cvCalcOpticalFlowPyrLK( const CvArr* prev, const CvArr* curr,
+ CvArr* prev_pyr, CvArr* curr_pyr,
+ const CvPoint2D32f* prev_features,
+ CvPoint2D32f* curr_features,
+ int count,
+ CvSize win_size,
+ int level,
+ char* status,
+ float* track_error,
+ CvTermCriteria criteria,
+ int flags );
+
+
+/* Modification of a previous sparse optical flow algorithm to calculate
+ affine flow */
+CVAPI(void) cvCalcAffineFlowPyrLK( const CvArr* prev, const CvArr* curr,
+ CvArr* prev_pyr, CvArr* curr_pyr,
+ const CvPoint2D32f* prev_features,
+ CvPoint2D32f* curr_features,
+ float* matrices, int count,
+ CvSize win_size, int level,
+ char* status, float* track_error,
+ CvTermCriteria criteria, int flags );
+
+/* Estimate rigid transformation between 2 images or 2 point sets */
+CVAPI(int) cvEstimateRigidTransform( const CvArr* A, const CvArr* B,
+ CvMat* M, int full_affine );
+
+/* Estimate optical flow for each pixel using the two-frame G. Farneback algorithm */
+CVAPI(void) cvCalcOpticalFlowFarneback( const CvArr* prev, const CvArr* next,
+ CvArr* flow, double pyr_scale, int levels,
+ int winsize, int iterations, int poly_n,
+ double poly_sigma, int flags );
+
+/********************************* motion templates *************************************/
+
+/****************************************************************************************\
+* All the motion template functions work only with single channel images. *
+* Silhouette image must have depth IPL_DEPTH_8U or IPL_DEPTH_8S *
+* Motion history image must have depth IPL_DEPTH_32F, *
+* Gradient mask - IPL_DEPTH_8U or IPL_DEPTH_8S, *
+* Motion orientation image - IPL_DEPTH_32F *
+* Segmentation mask - IPL_DEPTH_32F *
+* All the angles are in degrees, all the times are in milliseconds *
+\****************************************************************************************/
+
+/* Updates motion history image given motion silhouette */
+CVAPI(void) cvUpdateMotionHistory( const CvArr* silhouette, CvArr* mhi,
+ double timestamp, double duration );
+
+/* Calculates gradient of the motion history image and fills
+ a mask indicating where the gradient is valid */
+CVAPI(void) cvCalcMotionGradient( const CvArr* mhi, CvArr* mask, CvArr* orientation,
+ double delta1, double delta2,
+ int aperture_size CV_DEFAULT(3));
+
+/* Calculates average motion direction within a selected motion region
+ (region can be selected by setting ROIs and/or by composing a valid gradient mask
+ with the region mask) */
+CVAPI(double) cvCalcGlobalOrientation( const CvArr* orientation, const CvArr* mask,
+ const CvArr* mhi, double timestamp,
+ double duration );
+
+/* Splits a motion history image into a few parts corresponding to separate independent motions
+ (e.g. left hand, right hand) */
+CVAPI(CvSeq*) cvSegmentMotion( const CvArr* mhi, CvArr* seg_mask,
+ CvMemStorage* storage,
+ double timestamp, double seg_thresh );
+
+/****************************************************************************************\
+* Tracking *
+\****************************************************************************************/
+
+/* Implements CAMSHIFT algorithm - determines object position, size and orientation
+ from the object histogram back project (extension of meanshift) */
+CVAPI(int) cvCamShift( const CvArr* prob_image, CvRect window,
+ CvTermCriteria criteria, CvConnectedComp* comp,
+ CvBox2D* box CV_DEFAULT(NULL) );
+
+/* Implements MeanShift algorithm - determines object position
+ from the object histogram back project */
+CVAPI(int) cvMeanShift( const CvArr* prob_image, CvRect window,
+ CvTermCriteria criteria, CvConnectedComp* comp );
+
+/*
+standard Kalman filter (in G. Welch' and G. Bishop's notation):
+
+ x(k)=A*x(k-1)+B*u(k)+w(k) p(w)~N(0,Q)
+ z(k)=H*x(k)+v(k), p(v)~N(0,R)
+*/
+typedef struct CvKalman
+{
+ int MP; /* number of measurement vector dimensions */
+ int DP; /* number of state vector dimensions */
+ int CP; /* number of control vector dimensions */
+
+ /* backward compatibility fields */
+#if 1
+ float* PosterState; /* =state_pre->data.fl */
+ float* PriorState; /* =state_post->data.fl */
+ float* DynamMatr; /* =transition_matrix->data.fl */
+ float* MeasurementMatr; /* =measurement_matrix->data.fl */
+ float* MNCovariance; /* =measurement_noise_cov->data.fl */
+ float* PNCovariance; /* =process_noise_cov->data.fl */
+ float* KalmGainMatr; /* =gain->data.fl */
+ float* PriorErrorCovariance;/* =error_cov_pre->data.fl */
+ float* PosterErrorCovariance;/* =error_cov_post->data.fl */
+ float* Temp1; /* temp1->data.fl */
+ float* Temp2; /* temp2->data.fl */
+#endif
+
+ CvMat* state_pre; /* predicted state (x'(k)):
+ x(k)=A*x(k-1)+B*u(k) */
+ CvMat* state_post; /* corrected state (x(k)):
+ x(k)=x'(k)+K(k)*(z(k)-H*x'(k)) */
+ CvMat* transition_matrix; /* state transition matrix (A) */
+ CvMat* control_matrix; /* control matrix (B)
+ (it is not used if there is no control)*/
+ CvMat* measurement_matrix; /* measurement matrix (H) */
+ CvMat* process_noise_cov; /* process noise covariance matrix (Q) */
+ CvMat* measurement_noise_cov; /* measurement noise covariance matrix (R) */
+ CvMat* error_cov_pre; /* priori error estimate covariance matrix (P'(k)):
+ P'(k)=A*P(k-1)*At + Q)*/
+ CvMat* gain; /* Kalman gain matrix (K(k)):
+ K(k)=P'(k)*Ht*inv(H*P'(k)*Ht+R)*/
+ CvMat* error_cov_post; /* posteriori error estimate covariance matrix (P(k)):
+ P(k)=(I-K(k)*H)*P'(k) */
+ CvMat* temp1; /* temporary matrices */
+ CvMat* temp2;
+ CvMat* temp3;
+ CvMat* temp4;
+ CvMat* temp5;
+} CvKalman;
+
+/* Creates Kalman filter and sets A, B, Q, R and state to some initial values */
+CVAPI(CvKalman*) cvCreateKalman( int dynam_params, int measure_params,
+ int control_params CV_DEFAULT(0));
+
+/* Releases Kalman filter state */
+CVAPI(void) cvReleaseKalman( CvKalman** kalman);
+
+/* Updates Kalman filter by time (predicts future state of the system) */
+CVAPI(const CvMat*) cvKalmanPredict( CvKalman* kalman,
+ const CvMat* control CV_DEFAULT(NULL));
+
+/* Updates Kalman filter by measurement
+ (corrects state of the system and internal matrices) */
+CVAPI(const CvMat*) cvKalmanCorrect( CvKalman* kalman, const CvMat* measurement );
+
+#define cvKalmanUpdateByTime cvKalmanPredict
+#define cvKalmanUpdateByMeasurement cvKalmanCorrect
+
+#ifdef __cplusplus
+}
+
+namespace cv
+{
+
+//! updates motion history image using the current silhouette
+CV_EXPORTS_W void updateMotionHistory( InputArray silhouette, InputOutputArray mhi,
+ double timestamp, double duration );
+
+//! computes the motion gradient orientation image from the motion history image
+CV_EXPORTS_W void calcMotionGradient( InputArray mhi, OutputArray mask,
+ OutputArray orientation,
+ double delta1, double delta2,
+ int apertureSize=3 );
+
+//! computes the global orientation of the selected motion history image part
+CV_EXPORTS_W double calcGlobalOrientation( InputArray orientation, InputArray mask,
+ InputArray mhi, double timestamp,
+ double duration );
+
+CV_EXPORTS_W void segmentMotion(InputArray mhi, OutputArray segmask,
+ CV_OUT vector<Rect>& boundingRects,
+ double timestamp, double segThresh);
+
+//! updates the object tracking window using CAMSHIFT algorithm
+CV_EXPORTS_W RotatedRect CamShift( InputArray probImage, CV_OUT CV_IN_OUT Rect& window,
+ TermCriteria criteria );
+
+//! updates the object tracking window using meanshift algorithm
+CV_EXPORTS_W int meanShift( InputArray probImage, CV_OUT CV_IN_OUT Rect& window,
+ TermCriteria criteria );
+
+/*!
+ Kalman filter.
+
+ The class implements standard Kalman filter http://en.wikipedia.org/wiki/Kalman_filter.
+ However, you can modify KalmanFilter::transitionMatrix, KalmanFilter::controlMatrix and
+ KalmanFilter::measurementMatrix to get the extended Kalman filter functionality.
+*/
+class CV_EXPORTS_W KalmanFilter
+{
+public:
+ //! the default constructor
+ CV_WRAP KalmanFilter();
+ //! the full constructor taking the dimensionality of the state, of the measurement and of the control vector
+ CV_WRAP KalmanFilter(int dynamParams, int measureParams, int controlParams=0, int type=CV_32F);
+ //! re-initializes Kalman filter. The previous content is destroyed.
+ void init(int dynamParams, int measureParams, int controlParams=0, int type=CV_32F);
+
+ //! computes predicted state
+ CV_WRAP const Mat& predict(const Mat& control=Mat());
+ //! updates the predicted state from the measurement
+ CV_WRAP const Mat& correct(const Mat& measurement);
+
+ Mat statePre; //!< predicted state (x'(k)): x(k)=A*x(k-1)+B*u(k)
+ Mat statePost; //!< corrected state (x(k)): x(k)=x'(k)+K(k)*(z(k)-H*x'(k))
+ Mat transitionMatrix; //!< state transition matrix (A)
+ Mat controlMatrix; //!< control matrix (B) (not used if there is no control)
+ Mat measurementMatrix; //!< measurement matrix (H)
+ Mat processNoiseCov; //!< process noise covariance matrix (Q)
+ Mat measurementNoiseCov;//!< measurement noise covariance matrix (R)
+ Mat errorCovPre; //!< priori error estimate covariance matrix (P'(k)): P'(k)=A*P(k-1)*At + Q)*/
+ Mat gain; //!< Kalman gain matrix (K(k)): K(k)=P'(k)*Ht*inv(H*P'(k)*Ht+R)
+ Mat errorCovPost; //!< posteriori error estimate covariance matrix (P(k)): P(k)=(I-K(k)*H)*P'(k)
+
+ // temporary matrices
+ Mat temp1;
+ Mat temp2;
+ Mat temp3;
+ Mat temp4;
+ Mat temp5;
+};
+
+enum
+{
+ OPTFLOW_USE_INITIAL_FLOW = CV_LKFLOW_INITIAL_GUESSES,
+ OPTFLOW_LK_GET_MIN_EIGENVALS = CV_LKFLOW_GET_MIN_EIGENVALS,
+ OPTFLOW_FARNEBACK_GAUSSIAN = 256
+};
+
+//! constructs a pyramid which can be used as input for calcOpticalFlowPyrLK
+CV_EXPORTS_W int buildOpticalFlowPyramid(InputArray img, OutputArrayOfArrays pyramid,
+ Size winSize, int maxLevel, bool withDerivatives = true,
+ int pyrBorder = BORDER_REFLECT_101, int derivBorder = BORDER_CONSTANT,
+ bool tryReuseInputImage = true);
+
+//! computes sparse optical flow using multi-scale Lucas-Kanade algorithm
+CV_EXPORTS_W void calcOpticalFlowPyrLK( InputArray prevImg, InputArray nextImg,
+ InputArray prevPts, CV_OUT InputOutputArray nextPts,
+ OutputArray status, OutputArray err,
+ Size winSize=Size(21,21), int maxLevel=3,
+ TermCriteria criteria=TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, 0.01),
+ int flags=0, double minEigThreshold=1e-4);
+
+//! computes dense optical flow using Farneback algorithm
+CV_EXPORTS_W void calcOpticalFlowFarneback( InputArray prev, InputArray next,
+ CV_OUT InputOutputArray flow, double pyr_scale, int levels, int winsize,
+ int iterations, int poly_n, double poly_sigma, int flags );
+
+//! estimates the best-fit Euqcidean, similarity, affine or perspective transformation
+// that maps one 2D point set to another or one image to another.
+CV_EXPORTS_W Mat estimateRigidTransform( InputArray src, InputArray dst,
+ bool fullAffine);
+
+//! computes dense optical flow using Simple Flow algorithm
+CV_EXPORTS_W void calcOpticalFlowSF(Mat& from,
+ Mat& to,
+ Mat& flow,
+ int layers,
+ int averaging_block_size,
+ int max_flow);
+
+CV_EXPORTS_W void calcOpticalFlowSF(Mat& from,
+ Mat& to,
+ Mat& flow,
+ int layers,
+ int averaging_block_size,
+ int max_flow,
+ double sigma_dist,
+ double sigma_color,
+ int postprocess_window,
+ double sigma_dist_fix,
+ double sigma_color_fix,
+ double occ_thr,
+ int upscale_averaging_radius,
+ double upscale_sigma_dist,
+ double upscale_sigma_color,
+ double speed_up_thr);
+
+class CV_EXPORTS DenseOpticalFlow : public Algorithm
+{
+public:
+ virtual void calc(InputArray I0, InputArray I1, InputOutputArray flow) = 0;
+ virtual void collectGarbage() = 0;
+};
+
+// Implementation of the Zach, Pock and Bischof Dual TV-L1 Optical Flow method
+//
+// see reference:
+// [1] C. Zach, T. Pock and H. Bischof, "A Duality Based Approach for Realtime TV-L1 Optical Flow".
+// [2] Javier Sanchez, Enric Meinhardt-Llopis and Gabriele Facciolo. "TV-L1 Optical Flow Estimation".
+CV_EXPORTS Ptr<DenseOpticalFlow> createOptFlow_DualTVL1();
+
+}
+
+#endif
+
+#endif
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/video/video.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/video/video.hpp
new file mode 100644
index 00000000..1dd96f54
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/video/video.hpp
@@ -0,0 +1,58 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_VIDEO_HPP__
+#define __OPENCV_VIDEO_HPP__
+
+#include "opencv2/video/tracking.hpp"
+#include "opencv2/video/background_segm.hpp"
+
+#ifdef __cplusplus
+namespace cv
+{
+
+CV_EXPORTS bool initModule_video(void);
+
+}
+#endif
+
+#endif //__OPENCV_VIDEO_HPP__
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/videostab.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/videostab.hpp
new file mode 100644
index 00000000..4334d5c5
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/videostab.hpp
@@ -0,0 +1,43 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#include "opencv2/videostab/videostab.hpp"
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/videostab/deblurring.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/videostab/deblurring.hpp
new file mode 100644
index 00000000..a61f9ce6
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/videostab/deblurring.hpp
@@ -0,0 +1,110 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_VIDEOSTAB_DEBLURRING_HPP__
+#define __OPENCV_VIDEOSTAB_DEBLURRING_HPP__
+
+#include <vector>
+#include "opencv2/core/core.hpp"
+
+namespace cv
+{
+namespace videostab
+{
+
+CV_EXPORTS float calcBlurriness(const Mat &frame);
+
+class CV_EXPORTS DeblurerBase
+{
+public:
+ DeblurerBase() : radius_(0), frames_(0), motions_(0) {}
+
+ virtual ~DeblurerBase() {}
+
+ virtual void setRadius(int val) { radius_ = val; }
+ virtual int radius() const { return radius_; }
+
+ virtual void setFrames(const std::vector<Mat> &val) { frames_ = &val; }
+ virtual const std::vector<Mat>& frames() const { return *frames_; }
+
+ virtual void setMotions(const std::vector<Mat> &val) { motions_ = &val; }
+ virtual const std::vector<Mat>& motions() const { return *motions_; }
+
+ virtual void setBlurrinessRates(const std::vector<float> &val) { blurrinessRates_ = &val; }
+ virtual const std::vector<float>& blurrinessRates() const { return *blurrinessRates_; }
+
+ virtual void update() {}
+
+ virtual void deblur(int idx, Mat &frame) = 0;
+
+protected:
+ int radius_;
+ const std::vector<Mat> *frames_;
+ const std::vector<Mat> *motions_;
+ const std::vector<float> *blurrinessRates_;
+};
+
+class CV_EXPORTS NullDeblurer : public DeblurerBase
+{
+public:
+ virtual void deblur(int /*idx*/, Mat &/*frame*/) {}
+};
+
+class CV_EXPORTS WeightingDeblurer : public DeblurerBase
+{
+public:
+ WeightingDeblurer();
+
+ void setSensitivity(float val) { sensitivity_ = val; }
+ float sensitivity() const { return sensitivity_; }
+
+ virtual void deblur(int idx, Mat &frame);
+
+private:
+ float sensitivity_;
+ Mat_<float> bSum_, gSum_, rSum_, wSum_;
+};
+
+} // namespace videostab
+} // namespace cv
+
+#endif
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/videostab/fast_marching.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/videostab/fast_marching.hpp
new file mode 100644
index 00000000..23c5df3a
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/videostab/fast_marching.hpp
@@ -0,0 +1,103 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_VIDEOSTAB_FAST_MARCHING_HPP__
+#define __OPENCV_VIDEOSTAB_FAST_MARCHING_HPP__
+
+#include <cmath>
+#include <queue>
+#include <algorithm>
+#include "opencv2/core/core.hpp"
+
+namespace cv
+{
+namespace videostab
+{
+
+// See http://iwi.eldoc.ub.rug.nl/FILES/root/2004/JGraphToolsTelea/2004JGraphToolsTelea.pdf
+class CV_EXPORTS FastMarchingMethod
+{
+public:
+ FastMarchingMethod() : inf_(1e6f) {}
+
+ template <typename Inpaint>
+ Inpaint run(const Mat &mask, Inpaint inpaint);
+
+ Mat distanceMap() const { return dist_; }
+
+private:
+ enum { INSIDE = 0, BAND = 1, KNOWN = 255 };
+
+ struct DXY
+ {
+ float dist;
+ int x, y;
+
+ DXY() : dist(0), x(0), y(0) {}
+ DXY(float _dist, int _x, int _y) : dist(_dist), x(_x), y(_y) {}
+ bool operator <(const DXY &dxy) const { return dist < dxy.dist; }
+ };
+
+ float solve(int x1, int y1, int x2, int y2) const;
+ int& indexOf(const DXY &dxy) { return index_(dxy.y, dxy.x); }
+
+ void heapUp(int idx);
+ void heapDown(int idx);
+ void heapAdd(const DXY &dxy);
+ void heapRemoveMin();
+
+ float inf_;
+
+ cv::Mat_<uchar> flag_; // flag map
+ cv::Mat_<float> dist_; // distance map
+
+ cv::Mat_<int> index_; // index of point in the narrow band
+ std::vector<DXY> narrowBand_; // narrow band heap
+ int size_; // narrow band size
+};
+
+} // namespace videostab
+} // namespace cv
+
+#include "fast_marching_inl.hpp"
+
+#endif
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/videostab/fast_marching_inl.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/videostab/fast_marching_inl.hpp
new file mode 100644
index 00000000..dc860c2c
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/videostab/fast_marching_inl.hpp
@@ -0,0 +1,166 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_VIDEOSTAB_FAST_MARCHING_INL_HPP__
+#define __OPENCV_VIDEOSTAB_FAST_MARCHING_INL_HPP__
+
+#include "opencv2/videostab/fast_marching.hpp"
+
+namespace cv
+{
+namespace videostab
+{
+
+template <typename Inpaint>
+Inpaint FastMarchingMethod::run(const cv::Mat &mask, Inpaint inpaint)
+{
+ using namespace std;
+ using namespace cv;
+
+ CV_Assert(mask.type() == CV_8U);
+
+ static const int lut[4][2] = {{-1,0}, {0,-1}, {1,0}, {0,1}};
+
+ mask.copyTo(flag_);
+ flag_.create(mask.size());
+ dist_.create(mask.size());
+ index_.create(mask.size());
+ narrowBand_.clear();
+ size_ = 0;
+
+ // init
+ for (int y = 0; y < flag_.rows; ++y)
+ {
+ for (int x = 0; x < flag_.cols; ++x)
+ {
+ if (flag_(y,x) == KNOWN)
+ dist_(y,x) = 0.f;
+ else
+ {
+ int n = 0;
+ int nunknown = 0;
+
+ for (int i = 0; i < 4; ++i)
+ {
+ int xn = x + lut[i][0];
+ int yn = y + lut[i][1];
+
+ if (xn >= 0 && xn < flag_.cols && yn >= 0 && yn < flag_.rows)
+ {
+ n++;
+ if (flag_(yn,xn) != KNOWN)
+ nunknown++;
+ }
+ }
+
+ if (n>0 && nunknown == n)
+ {
+ dist_(y,x) = inf_;
+ flag_(y,x) = INSIDE;
+ }
+ else
+ {
+ dist_(y,x) = 0.f;
+ flag_(y,x) = BAND;
+ inpaint(x, y);
+
+ narrowBand_.push_back(DXY(0.f,x,y));
+ index_(y,x) = size_++;
+ }
+ }
+ }
+ }
+
+ // make heap
+ for (int i = size_/2-1; i >= 0; --i)
+ heapDown(i);
+
+ // main cycle
+ while (size_ > 0)
+ {
+ int x = narrowBand_[0].x;
+ int y = narrowBand_[0].y;
+ heapRemoveMin();
+
+ flag_(y,x) = KNOWN;
+ for (int n = 0; n < 4; ++n)
+ {
+ int xn = x + lut[n][0];
+ int yn = y + lut[n][1];
+
+ if (xn >= 0 && xn < flag_.cols && yn >= 0 && yn < flag_.rows && flag_(yn,xn) != KNOWN)
+ {
+ dist_(yn,xn) = min(min(solve(xn-1, yn, xn, yn-1), solve(xn+1, yn, xn, yn-1)),
+ min(solve(xn-1, yn, xn, yn+1), solve(xn+1, yn, xn, yn+1)));
+
+ if (flag_(yn,xn) == INSIDE)
+ {
+ flag_(yn,xn) = BAND;
+ inpaint(xn, yn);
+ heapAdd(DXY(dist_(yn,xn),xn,yn));
+ }
+ else
+ {
+ int i = index_(yn,xn);
+ if (dist_(yn,xn) < narrowBand_[i].dist)
+ {
+ narrowBand_[i].dist = dist_(yn,xn);
+ heapUp(i);
+ }
+ // works better if it's commented out
+ /*else if (dist(yn,xn) > narrowBand[i].dist)
+ {
+ narrowBand[i].dist = dist(yn,xn);
+ heapDown(i);
+ }*/
+ }
+ }
+ }
+ }
+
+ return inpaint;
+}
+
+} // namespace videostab
+} // namespace cv
+
+#endif
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/videostab/frame_source.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/videostab/frame_source.hpp
new file mode 100644
index 00000000..c22c0a4c
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/videostab/frame_source.hpp
@@ -0,0 +1,91 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_VIDEOSTAB_FRAME_SOURCE_HPP__
+#define __OPENCV_VIDEOSTAB_FRAME_SOURCE_HPP__
+
+#include <vector>
+#include <string>
+#include "opencv2/core/core.hpp"
+#include "opencv2/highgui/highgui.hpp"
+
+namespace cv
+{
+namespace videostab
+{
+
+class CV_EXPORTS IFrameSource
+{
+public:
+ virtual ~IFrameSource() {}
+ virtual void reset() = 0;
+ virtual Mat nextFrame() = 0;
+};
+
+class CV_EXPORTS NullFrameSource : public IFrameSource
+{
+public:
+ virtual void reset() {}
+ virtual Mat nextFrame() { return Mat(); }
+};
+
+class CV_EXPORTS VideoFileSource : public IFrameSource
+{
+public:
+ VideoFileSource(const std::string &path, bool volatileFrame = false);
+
+ virtual void reset();
+ virtual Mat nextFrame();
+
+ int frameCount() { return static_cast<int>(reader_.get(CV_CAP_PROP_FRAME_COUNT)); }
+ double fps() { return reader_.get(CV_CAP_PROP_FPS); }
+
+private:
+ std::string path_;
+ bool volatileFrame_;
+ VideoCapture reader_;
+};
+
+} // namespace videostab
+} // namespace cv
+
+#endif
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/videostab/global_motion.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/videostab/global_motion.hpp
new file mode 100644
index 00000000..f5f34b92
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/videostab/global_motion.hpp
@@ -0,0 +1,141 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_VIDEOSTAB_GLOBAL_MOTION_HPP__
+#define __OPENCV_VIDEOSTAB_GLOBAL_MOTION_HPP__
+
+#include <vector>
+#include "opencv2/core/core.hpp"
+#include "opencv2/features2d/features2d.hpp"
+#include "opencv2/videostab/optical_flow.hpp"
+
+namespace cv
+{
+namespace videostab
+{
+
+enum MotionModel
+{
+ TRANSLATION = 0,
+ TRANSLATION_AND_SCALE = 1,
+ LINEAR_SIMILARITY = 2,
+ AFFINE = 3
+};
+
+CV_EXPORTS Mat estimateGlobalMotionLeastSquares(
+ const std::vector<Point2f> &points0, const std::vector<Point2f> &points1,
+ int model = AFFINE, float *rmse = 0);
+
+struct CV_EXPORTS RansacParams
+{
+ int size; // subset size
+ float thresh; // max error to classify as inlier
+ float eps; // max outliers ratio
+ float prob; // probability of success
+
+ RansacParams(int _size, float _thresh, float _eps, float _prob)
+ : size(_size), thresh(_thresh), eps(_eps), prob(_prob) {}
+
+ static RansacParams translationMotionStd() { return RansacParams(2, 0.5f, 0.5f, 0.99f); }
+ static RansacParams translationAndScale2dMotionStd() { return RansacParams(3, 0.5f, 0.5f, 0.99f); }
+ static RansacParams linearSimilarityMotionStd() { return RansacParams(4, 0.5f, 0.5f, 0.99f); }
+ static RansacParams affine2dMotionStd() { return RansacParams(6, 0.5f, 0.5f, 0.99f); }
+};
+
+CV_EXPORTS Mat estimateGlobalMotionRobust(
+ const std::vector<Point2f> &points0, const std::vector<Point2f> &points1,
+ int model = AFFINE, const RansacParams &params = RansacParams::affine2dMotionStd(),
+ float *rmse = 0, int *ninliers = 0);
+
+class CV_EXPORTS IGlobalMotionEstimator
+{
+public:
+ virtual ~IGlobalMotionEstimator() {}
+ virtual Mat estimate(const Mat &frame0, const Mat &frame1) = 0;
+};
+
+class CV_EXPORTS PyrLkRobustMotionEstimator : public IGlobalMotionEstimator
+{
+public:
+ PyrLkRobustMotionEstimator();
+
+ void setDetector(Ptr<FeatureDetector> val) { detector_ = val; }
+ Ptr<FeatureDetector> detector() const { return detector_; }
+
+ void setOptFlowEstimator(Ptr<ISparseOptFlowEstimator> val) { optFlowEstimator_ = val; }
+ Ptr<ISparseOptFlowEstimator> optFlowEstimator() const { return optFlowEstimator_; }
+
+ void setMotionModel(MotionModel val) { motionModel_ = val; }
+ MotionModel motionModel() const { return motionModel_; }
+
+ void setRansacParams(const RansacParams &val) { ransacParams_ = val; }
+ RansacParams ransacParams() const { return ransacParams_; }
+
+ void setMaxRmse(float val) { maxRmse_ = val; }
+ float maxRmse() const { return maxRmse_; }
+
+ void setMinInlierRatio(float val) { minInlierRatio_ = val; }
+ float minInlierRatio() const { return minInlierRatio_; }
+
+ virtual Mat estimate(const Mat &frame0, const Mat &frame1);
+
+private:
+ Ptr<FeatureDetector> detector_;
+ Ptr<ISparseOptFlowEstimator> optFlowEstimator_;
+ MotionModel motionModel_;
+ RansacParams ransacParams_;
+ std::vector<uchar> status_;
+ std::vector<KeyPoint> keypointsPrev_;
+ std::vector<Point2f> pointsPrev_, points_;
+ std::vector<Point2f> pointsPrevGood_, pointsGood_;
+ float maxRmse_;
+ float minInlierRatio_;
+};
+
+CV_EXPORTS Mat getMotion(int from, int to, const Mat *motions, int size);
+
+CV_EXPORTS Mat getMotion(int from, int to, const std::vector<Mat> &motions);
+
+} // namespace videostab
+} // namespace cv
+
+#endif
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/videostab/inpainting.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/videostab/inpainting.hpp
new file mode 100644
index 00000000..8df60f7e
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/videostab/inpainting.hpp
@@ -0,0 +1,200 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_VIDEOSTAB_INPAINTINT_HPP__
+#define __OPENCV_VIDEOSTAB_INPAINTINT_HPP__
+
+#include <vector>
+#include "opencv2/core/core.hpp"
+#include "opencv2/videostab/optical_flow.hpp"
+#include "opencv2/videostab/fast_marching.hpp"
+#include "opencv2/photo/photo.hpp"
+
+namespace cv
+{
+namespace videostab
+{
+
+class CV_EXPORTS InpainterBase
+{
+public:
+ InpainterBase()
+ : radius_(0), frames_(0), motions_(0),
+ stabilizedFrames_(0), stabilizationMotions_(0) {}
+
+ virtual ~InpainterBase() {}
+
+ virtual void setRadius(int val) { radius_ = val; }
+ virtual int radius() const { return radius_; }
+
+ virtual void setFrames(const std::vector<Mat> &val) { frames_ = &val; }
+ virtual const std::vector<Mat>& frames() const { return *frames_; }
+
+ virtual void setMotions(const std::vector<Mat> &val) { motions_ = &val; }
+ virtual const std::vector<Mat>& motions() const { return *motions_; }
+
+ virtual void setStabilizedFrames(const std::vector<Mat> &val) { stabilizedFrames_ = &val; }
+ virtual const std::vector<Mat>& stabilizedFrames() const { return *stabilizedFrames_; }
+
+ virtual void setStabilizationMotions(const std::vector<Mat> &val) { stabilizationMotions_ = &val; }
+ virtual const std::vector<Mat>& stabilizationMotions() const { return *stabilizationMotions_; }
+
+ virtual void update() {}
+
+ virtual void inpaint(int idx, Mat &frame, Mat &mask) = 0;
+
+protected:
+ int radius_;
+ const std::vector<Mat> *frames_;
+ const std::vector<Mat> *motions_;
+ const std::vector<Mat> *stabilizedFrames_;
+ const std::vector<Mat> *stabilizationMotions_;
+};
+
+class CV_EXPORTS NullInpainter : public InpainterBase
+{
+public:
+ virtual void inpaint(int /*idx*/, Mat &/*frame*/, Mat &/*mask*/) {}
+};
+
+class CV_EXPORTS InpaintingPipeline : public InpainterBase
+{
+public:
+ void pushBack(Ptr<InpainterBase> inpainter) { inpainters_.push_back(inpainter); }
+ bool empty() const { return inpainters_.empty(); }
+
+ virtual void setRadius(int val);
+ virtual void setFrames(const std::vector<Mat> &val);
+ virtual void setMotions(const std::vector<Mat> &val);
+ virtual void setStabilizedFrames(const std::vector<Mat> &val);
+ virtual void setStabilizationMotions(const std::vector<Mat> &val);
+
+ virtual void update();
+
+ virtual void inpaint(int idx, Mat &frame, Mat &mask);
+
+private:
+ std::vector<Ptr<InpainterBase> > inpainters_;
+};
+
+class CV_EXPORTS ConsistentMosaicInpainter : public InpainterBase
+{
+public:
+ ConsistentMosaicInpainter();
+
+ void setStdevThresh(float val) { stdevThresh_ = val; }
+ float stdevThresh() const { return stdevThresh_; }
+
+ virtual void inpaint(int idx, Mat &frame, Mat &mask);
+
+private:
+ float stdevThresh_;
+};
+
+class CV_EXPORTS MotionInpainter : public InpainterBase
+{
+public:
+ MotionInpainter();
+
+ void setOptFlowEstimator(Ptr<IDenseOptFlowEstimator> val) { optFlowEstimator_ = val; }
+ Ptr<IDenseOptFlowEstimator> optFlowEstimator() const { return optFlowEstimator_; }
+
+ void setFlowErrorThreshold(float val) { flowErrorThreshold_ = val; }
+ float flowErrorThreshold() const { return flowErrorThreshold_; }
+
+ void setDistThreshold(float val) { distThresh_ = val; }
+ float distThresh() const { return distThresh_; }
+
+ void setBorderMode(int val) { borderMode_ = val; }
+ int borderMode() const { return borderMode_; }
+
+ virtual void inpaint(int idx, Mat &frame, Mat &mask);
+
+private:
+ FastMarchingMethod fmm_;
+ Ptr<IDenseOptFlowEstimator> optFlowEstimator_;
+ float flowErrorThreshold_;
+ float distThresh_;
+ int borderMode_;
+
+ Mat frame1_, transformedFrame1_;
+ Mat_<uchar> grayFrame_, transformedGrayFrame1_;
+ Mat_<uchar> mask1_, transformedMask1_;
+ Mat_<float> flowX_, flowY_, flowErrors_;
+ Mat_<uchar> flowMask_;
+};
+
+class CV_EXPORTS ColorAverageInpainter : public InpainterBase
+{
+public:
+ virtual void inpaint(int idx, Mat &frame, Mat &mask);
+
+private:
+ FastMarchingMethod fmm_;
+};
+
+class CV_EXPORTS ColorInpainter : public InpainterBase
+{
+public:
+ ColorInpainter(int method = INPAINT_TELEA, double _radius = 2.)
+ : method_(method), radius_(_radius) {}
+
+ virtual void inpaint(int idx, Mat &frame, Mat &mask);
+
+private:
+ int method_;
+ double radius_;
+ Mat invMask_;
+};
+
+CV_EXPORTS void calcFlowMask(
+ const Mat &flowX, const Mat &flowY, const Mat &errors, float maxError,
+ const Mat &mask0, const Mat &mask1, Mat &flowMask);
+
+CV_EXPORTS void completeFrameAccordingToFlow(
+ const Mat &flowMask, const Mat &flowX, const Mat &flowY, const Mat &frame1, const Mat &mask1,
+ float distThresh, Mat& frame0, Mat &mask0);
+
+} // namespace videostab
+} // namespace cv
+
+#endif
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/videostab/log.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/videostab/log.hpp
new file mode 100644
index 00000000..ce6fadf8
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/videostab/log.hpp
@@ -0,0 +1,75 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_VIDEOSTAB_LOG_HPP__
+#define __OPENCV_VIDEOSTAB_LOG_HPP__
+
+#include "opencv2/core/core.hpp"
+
+namespace cv
+{
+namespace videostab
+{
+
+class CV_EXPORTS ILog
+{
+public:
+ virtual ~ILog() {}
+ virtual void print(const char *format, ...) = 0;
+};
+
+class CV_EXPORTS NullLog : public ILog
+{
+public:
+ virtual void print(const char * /*format*/, ...) {}
+};
+
+class CV_EXPORTS LogToStdout : public ILog
+{
+public:
+ virtual void print(const char *format, ...);
+};
+
+} // namespace videostab
+} // namespace cv
+
+#endif
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/videostab/motion_stabilizing.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/videostab/motion_stabilizing.hpp
new file mode 100644
index 00000000..de05ad25
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/videostab/motion_stabilizing.hpp
@@ -0,0 +1,106 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_VIDEOSTAB_MOTION_STABILIZING_HPP__
+#define __OPENCV_VIDEOSTAB_MOTION_STABILIZING_HPP__
+
+#include <vector>
+#include "opencv2/core/core.hpp"
+
+namespace cv
+{
+namespace videostab
+{
+
+class CV_EXPORTS IMotionStabilizer
+{
+public:
+ virtual void stabilize(const Mat *motions, int size, Mat *stabilizationMotions) const = 0;
+
+#ifdef OPENCV_CAN_BREAK_BINARY_COMPATIBILITY
+ virtual ~IMotionStabilizer() {}
+#endif
+};
+
+class CV_EXPORTS MotionFilterBase : public IMotionStabilizer
+{
+public:
+ MotionFilterBase() : radius_(0) {}
+ virtual ~MotionFilterBase() {}
+
+ virtual void setRadius(int val) { radius_ = val; }
+ virtual int radius() const { return radius_; }
+
+ virtual void update() {}
+
+ virtual Mat stabilize(int index, const Mat *motions, int size) const = 0;
+ virtual void stabilize(const Mat *motions, int size, Mat *stabilizationMotions) const;
+
+protected:
+ int radius_;
+};
+
+class CV_EXPORTS GaussianMotionFilter : public MotionFilterBase
+{
+public:
+ GaussianMotionFilter() : stdev_(-1.f) {}
+
+ void setStdev(float val) { stdev_ = val; }
+ float stdev() const { return stdev_; }
+
+ virtual void update();
+
+ virtual Mat stabilize(int index, const Mat *motions, int size) const;
+
+private:
+ float stdev_;
+ std::vector<float> weight_;
+};
+
+CV_EXPORTS Mat ensureInclusionConstraint(const Mat &M, Size size, float trimRatio);
+
+CV_EXPORTS float estimateOptimalTrimRatio(const Mat &M, Size size);
+
+} // namespace videostab
+} // namespace
+
+#endif
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/videostab/optical_flow.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/videostab/optical_flow.hpp
new file mode 100644
index 00000000..2c1742fc
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/videostab/optical_flow.hpp
@@ -0,0 +1,120 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_VIDEOSTAB_OPTICAL_FLOW_HPP__
+#define __OPENCV_VIDEOSTAB_OPTICAL_FLOW_HPP__
+
+#include "opencv2/core/core.hpp"
+#include "opencv2/opencv_modules.hpp"
+
+#if defined(HAVE_OPENCV_GPU) && !defined(ANDROID)
+# include "opencv2/gpu/gpu.hpp"
+#endif
+
+namespace cv
+{
+namespace videostab
+{
+
+class CV_EXPORTS ISparseOptFlowEstimator
+{
+public:
+ virtual ~ISparseOptFlowEstimator() {}
+ virtual void run(
+ InputArray frame0, InputArray frame1, InputArray points0, InputOutputArray points1,
+ OutputArray status, OutputArray errors) = 0;
+};
+
+class CV_EXPORTS IDenseOptFlowEstimator
+{
+public:
+ virtual ~IDenseOptFlowEstimator() {}
+ virtual void run(
+ InputArray frame0, InputArray frame1, InputOutputArray flowX, InputOutputArray flowY,
+ OutputArray errors) = 0;
+};
+
+class CV_EXPORTS PyrLkOptFlowEstimatorBase
+{
+public:
+ PyrLkOptFlowEstimatorBase() { setWinSize(Size(21, 21)); setMaxLevel(3); }
+
+ void setWinSize(Size val) { winSize_ = val; }
+ Size winSize() const { return winSize_; }
+
+ void setMaxLevel(int val) { maxLevel_ = val; }
+ int maxLevel() const { return maxLevel_; }
+
+protected:
+ Size winSize_;
+ int maxLevel_;
+};
+
+class CV_EXPORTS SparsePyrLkOptFlowEstimator
+ : public PyrLkOptFlowEstimatorBase, public ISparseOptFlowEstimator
+{
+public:
+ virtual void run(
+ InputArray frame0, InputArray frame1, InputArray points0, InputOutputArray points1,
+ OutputArray status, OutputArray errors);
+};
+
+#if defined(HAVE_OPENCV_GPU) && !defined(ANDROID)
+class CV_EXPORTS DensePyrLkOptFlowEstimatorGpu
+ : public PyrLkOptFlowEstimatorBase, public IDenseOptFlowEstimator
+{
+public:
+ DensePyrLkOptFlowEstimatorGpu();
+
+ virtual void run(
+ InputArray frame0, InputArray frame1, InputOutputArray flowX, InputOutputArray flowY,
+ OutputArray errors);
+private:
+ gpu::PyrLKOpticalFlow optFlowEstimator_;
+ gpu::GpuMat frame0_, frame1_, flowX_, flowY_, errors_;
+};
+#endif
+
+} // namespace videostab
+} // namespace cv
+
+#endif
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/videostab/stabilizer.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/videostab/stabilizer.hpp
new file mode 100644
index 00000000..d1d53887
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/videostab/stabilizer.hpp
@@ -0,0 +1,187 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_VIDEOSTAB_STABILIZER_HPP__
+#define __OPENCV_VIDEOSTAB_STABILIZER_HPP__
+
+#include <vector>
+#include "opencv2/core/core.hpp"
+#include "opencv2/imgproc/imgproc.hpp"
+#include "opencv2/videostab/global_motion.hpp"
+#include "opencv2/videostab/motion_stabilizing.hpp"
+#include "opencv2/videostab/frame_source.hpp"
+#include "opencv2/videostab/log.hpp"
+#include "opencv2/videostab/inpainting.hpp"
+#include "opencv2/videostab/deblurring.hpp"
+
+namespace cv
+{
+namespace videostab
+{
+
+class CV_EXPORTS StabilizerBase
+{
+public:
+ virtual ~StabilizerBase() {}
+
+ void setLog(Ptr<ILog> _log) { log_ = _log; }
+ Ptr<ILog> log() const { return log_; }
+
+ void setRadius(int val) { radius_ = val; }
+ int radius() const { return radius_; }
+
+ void setFrameSource(Ptr<IFrameSource> val) { frameSource_ = val; }
+ Ptr<IFrameSource> frameSource() const { return frameSource_; }
+
+ void setMotionEstimator(Ptr<IGlobalMotionEstimator> val) { motionEstimator_ = val; }
+ Ptr<IGlobalMotionEstimator> motionEstimator() const { return motionEstimator_; }
+
+ void setDeblurer(Ptr<DeblurerBase> val) { deblurer_ = val; }
+ Ptr<DeblurerBase> deblurrer() const { return deblurer_; }
+
+ void setTrimRatio(float val) { trimRatio_ = val; }
+ float trimRatio() const { return trimRatio_; }
+
+ void setCorrectionForInclusion(bool val) { doCorrectionForInclusion_ = val; }
+ bool doCorrectionForInclusion() const { return doCorrectionForInclusion_; }
+
+ void setBorderMode(int val) { borderMode_ = val; }
+ int borderMode() const { return borderMode_; }
+
+ void setInpainter(Ptr<InpainterBase> val) { inpainter_ = val; }
+ Ptr<InpainterBase> inpainter() const { return inpainter_; }
+
+protected:
+ StabilizerBase();
+
+ void setUp(int cacheSize, const Mat &frame);
+ Mat nextStabilizedFrame();
+ bool doOneIteration();
+ void stabilizeFrame(const Mat &stabilizationMotion);
+
+ virtual void setUp(Mat &firstFrame) = 0;
+ virtual void stabilizeFrame() = 0;
+ virtual void estimateMotion() = 0;
+
+ Ptr<ILog> log_;
+ Ptr<IFrameSource> frameSource_;
+ Ptr<IGlobalMotionEstimator> motionEstimator_;
+ Ptr<DeblurerBase> deblurer_;
+ Ptr<InpainterBase> inpainter_;
+ int radius_;
+ float trimRatio_;
+ bool doCorrectionForInclusion_;
+ int borderMode_;
+
+ Size frameSize_;
+ Mat frameMask_;
+ int curPos_;
+ int curStabilizedPos_;
+ bool doDeblurring_;
+ Mat preProcessedFrame_;
+ bool doInpainting_;
+ Mat inpaintingMask_;
+ std::vector<Mat> frames_;
+ std::vector<Mat> motions_; // motions_[i] is the motion from i-th to i+1-th frame
+ std::vector<float> blurrinessRates_;
+ std::vector<Mat> stabilizedFrames_;
+ std::vector<Mat> stabilizedMasks_;
+ std::vector<Mat> stabilizationMotions_;
+};
+
+class CV_EXPORTS OnePassStabilizer : public StabilizerBase, public IFrameSource
+{
+public:
+ OnePassStabilizer();
+
+ void setMotionFilter(Ptr<MotionFilterBase> val) { motionFilter_ = val; }
+ Ptr<MotionFilterBase> motionFilter() const { return motionFilter_; }
+
+ virtual void reset() { resetImpl(); }
+ virtual Mat nextFrame() { return nextStabilizedFrame(); }
+
+private:
+ void resetImpl();
+
+ virtual void setUp(Mat &firstFrame);
+ virtual void estimateMotion();
+ virtual void stabilizeFrame();
+
+ Ptr<MotionFilterBase> motionFilter_;
+};
+
+class CV_EXPORTS TwoPassStabilizer : public StabilizerBase, public IFrameSource
+{
+public:
+ TwoPassStabilizer();
+
+ void setMotionStabilizer(Ptr<IMotionStabilizer> val) { motionStabilizer_ = val; }
+ Ptr<IMotionStabilizer> motionStabilizer() const { return motionStabilizer_; }
+
+ void setEstimateTrimRatio(bool val) { mustEstTrimRatio_ = val; }
+ bool mustEstimateTrimaRatio() const { return mustEstTrimRatio_; }
+
+ virtual void reset() { resetImpl(); }
+ virtual Mat nextFrame();
+
+ // available after pre-pass, before it's empty
+ std::vector<Mat> motions() const;
+
+private:
+ void resetImpl();
+ void runPrePassIfNecessary();
+
+ virtual void setUp(Mat &firstFrame);
+ virtual void estimateMotion() { /* do nothing as motion was estimation in pre-pass */ }
+ virtual void stabilizeFrame();
+
+ Ptr<IMotionStabilizer> motionStabilizer_;
+ bool mustEstTrimRatio_;
+
+ int frameCount_;
+ bool isPrePassDone_;
+};
+
+} // namespace videostab
+} // namespace cv
+
+#endif
diff --git a/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/videostab/videostab.hpp b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/videostab/videostab.hpp
new file mode 100644
index 00000000..3ea34a89
--- /dev/null
+++ b/2.3-1/src/c/hardware/rasberrypi/libraries/opencv/opencv2/videostab/videostab.hpp
@@ -0,0 +1,48 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_VIDEOSTAB_HPP__
+#define __OPENCV_VIDEOSTAB_HPP__
+
+#include "opencv2/videostab/stabilizer.hpp"
+
+#endif
diff --git a/2.3-1/src/c/imageProcessing/cvcore/imcvCreateImages.c b/2.3-1/src/c/imageProcessing/cvcore/imcvCreateImages.c
index a7eeaec2..4543b724 100644
--- a/2.3-1/src/c/imageProcessing/cvcore/imcvCreateImages.c
+++ b/2.3-1/src/c/imageProcessing/cvcore/imcvCreateImages.c
@@ -19,7 +19,7 @@
IplImage* imcvCreateImages(int width, int height, char *bit_depth, uint8 no_of_ch)
{
CvSize imageSize = cvSize (width,height);
- IplImage *img = NULL;
+ IplImage* img = NULL;
if (strcmp(bit_depth,"IPL_DEPTH_1U") == 0)
img = cvCreateImage(imageSize,IPL_DEPTH_1U,no_of_ch);
else if (strcmp(bit_depth,"IPL_DEPTH_8U") == 0)
diff --git a/2.3-1/src/c/imageProcessing/cvcore/imcvGetImgSizes.c b/2.3-1/src/c/imageProcessing/cvcore/imcvGetImgSizes.c
new file mode 100644
index 00000000..2faa6271
--- /dev/null
+++ b/2.3-1/src/c/imageProcessing/cvcore/imcvGetImgSizes.c
@@ -0,0 +1,32 @@
+/* Copyright (C) 2016 - IIT Bombay - FOSSEE
+
+ This file must be used under the terms of the CeCILL.
+ This source file is licensed as described in the file COPYING, which
+ you should have received as part of this distribution. The terms
+ are also available at
+ http://www.cecill.info/licences/Licence_CeCILL_V2-en.txt
+ Author: Siddhesh Wani
+ Organization: FOSSEE, IIT Bombay
+ Email: toolbox@scilab.in
+*/
+
+/* Function to convert image object to other color space*/
+
+#include "types.h"
+#include "cvcore.h"
+#include "cvimgproc.h"
+#include <stdio.h>
+
+void imcvGetImgSizes(IplImage* src, double* imgsize)
+{
+ if(src != NULL)
+ {
+ imgsize[0] = src->width;
+ imgsize[1] = src->height;
+ }
+ else
+ {
+ printf("Error with input image");
+ }
+
+} \ No newline at end of file
diff --git a/2.3-1/src/c/imageProcessing/cvimgproc/imcvCvtColors.c b/2.3-1/src/c/imageProcessing/cvimgproc/imcvCvtColors.c
new file mode 100644
index 00000000..bc2a70ec
--- /dev/null
+++ b/2.3-1/src/c/imageProcessing/cvimgproc/imcvCvtColors.c
@@ -0,0 +1,27 @@
+/* Copyright (C) 2016 - IIT Bombay - FOSSEE
+
+ This file must be used under the terms of the CeCILL.
+ This source file is licensed as described in the file COPYING, which
+ you should have received as part of this distribution. The terms
+ are also available at
+ http://www.cecill.info/licences/Licence_CeCILL_V2-en.txt
+ Author: Siddhesh Wani
+ Organization: FOSSEE, IIT Bombay
+ Email: toolbox@scilab.in
+*/
+
+/* Function to convert image object to other color space*/
+
+#include "types.h"
+#include "cvcore.h"
+#include "cvimgproc.h"
+#include <stdio.h>
+
+uint8 imcvCvtColors(IplImage* src, IplImage* dst, char* code)
+{
+
+ if(strcmp(code,"CV_BGR2GRAY") == 0)
+ cvCvtColor(src,dst,CV_RGB2GRAY);
+
+ return 0;
+} \ No newline at end of file
diff --git a/2.3-1/src/c/imageProcessing/cvimgproc/imcvThresholds.c b/2.3-1/src/c/imageProcessing/cvimgproc/imcvThresholds.c
new file mode 100644
index 00000000..cd66c52e
--- /dev/null
+++ b/2.3-1/src/c/imageProcessing/cvimgproc/imcvThresholds.c
@@ -0,0 +1,27 @@
+/* Copyright (C) 2016 - IIT Bombay - FOSSEE
+
+ This file must be used under the terms of the CeCILL.
+ This source file is licensed as described in the file COPYING, which
+ you should have received as part of this distribution. The terms
+ are also available at
+ http://www.cecill.info/licences/Licence_CeCILL_V2-en.txt
+ Author: Siddhesh Wani
+ Organization: FOSSEE, IIT Bombay
+ Email: toolbox@scilab.in
+*/
+
+/* Function to threshold a gray scale image*/
+
+#include "types.h"
+#include "cvcore.h"
+#include "cvimgproc.h"
+#include <stdio.h>
+
+IplImage* imcvThresholds(IplImage* src, double threshold, double maxvalue, char* type)
+{
+ dst = imcvCreates(src->width, src->height, src->,1);
+ if(strcmp(code,"CV_BGR2GRAY") == 0)
+ cvCvtColor(src,dst,CV_RGB2GRAY);
+
+ return 0;
+} \ No newline at end of file
diff --git a/2.3-1/src/c/imageProcessing/includes/cvcore.h b/2.3-1/src/c/imageProcessing/includes/cvcore.h
index 317d99b6..4f04e581 100644
--- a/2.3-1/src/c/imageProcessing/includes/cvcore.h
+++ b/2.3-1/src/c/imageProcessing/includes/cvcore.h
@@ -22,7 +22,7 @@ extern "C" {
#include "opencv2/core/core.hpp"
IplImage* imcvCreateImages(int width, int height, char *bit_depth, uint8 no_of_ch);
-
+void imcvGetImgSizes(IplImage* src, double* imgsize);
#ifdef __cplusplus
} /* extern "C" */
#endif
diff --git a/2.3-1/src/c/imageProcessing/includes/cvhighgui.h b/2.3-1/src/c/imageProcessing/includes/cvhighgui.h
index cae3dd1b..2e959a5d 100644
--- a/2.3-1/src/c/imageProcessing/includes/cvhighgui.h
+++ b/2.3-1/src/c/imageProcessing/includes/cvhighgui.h
@@ -20,7 +20,7 @@ extern "C" {
#include "types.h"
#include "opencv2/core/core.hpp"
-#include "opencv2/highgui.hpp"
+#include "opencv2/highgui/highgui.hpp"
IplImage* imcvLoadImages(char *filename, uint8 opentype);
uint8 imcvShowImages(char *winname, IplImage* img);
diff --git a/2.3-1/src/c/imageProcessing/includes/cvimgproc.h b/2.3-1/src/c/imageProcessing/includes/cvimgproc.h
new file mode 100644
index 00000000..5d982e49
--- /dev/null
+++ b/2.3-1/src/c/imageProcessing/includes/cvimgproc.h
@@ -0,0 +1,31 @@
+/* Copyright (C) 2016 - IIT Bombay - FOSSEE
+
+ This file must be used under the terms of the CeCILL.
+ This source file is licensed as described in the file COPYING, which
+ you should have received as part of this distribution. The terms
+ are also available at
+ http://www.cecill.info/licences/Licence_CeCILL_V2-en.txt
+ Author: Siddhesh Wani
+ Organization: FOSSEE, IIT Bombay
+ Email: toolbox@scilab.in
+*/
+
+#ifndef __CVIMGPROC_H__
+#define __CVIMGPROC_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+#include "types.h"
+#include "opencv2/core/core.hpp"
+#include "opencv2/imgproc/imgproc.hpp"
+
+uint8 imcvCvtColor(IplImage* src, IplImage* dst, char* code);
+
+#ifdef __cplusplus
+} /* extern "C" */
+#endif
+
+#endif /*__CVIMGPROC_H__*/
diff --git a/2.3-1/src/c/imageProcessing/interfaces/int_cvcore.h b/2.3-1/src/c/imageProcessing/interfaces/int_cvcore.h
index 9d83caac..5ae0055f 100644
--- a/2.3-1/src/c/imageProcessing/interfaces/int_cvcore.h
+++ b/2.3-1/src/c/imageProcessing/interfaces/int_cvcore.h
@@ -19,11 +19,12 @@ extern "C" {
#include "types.h"
-
+#include "cvcore.h"
#define d0d0g2d0CV_CreateImageim0(width,height,depth,depth_size,no_of_ch) \
imcvCreateImages(width,height,depth,no_of_ch)
+#define im0CV_GetImgSized2(img,imgsize) imcvGetImgSizes(img,imgsize)
#ifdef __cplusplus
} /* extern "C" */
#endif
diff --git a/2.3-1/src/c/imageProcessing/interfaces/int_cvhighgui.h b/2.3-1/src/c/imageProcessing/interfaces/int_cvhighgui.h
index f08ac40b..d8ecf1c6 100644
--- a/2.3-1/src/c/imageProcessing/interfaces/int_cvhighgui.h
+++ b/2.3-1/src/c/imageProcessing/interfaces/int_cvhighgui.h
@@ -19,7 +19,7 @@ extern "C" {
#include "types.h"
-#include "opencv2/highgui.hpp"
+#include "opencv2/highgui/highgui.hpp"
#define g2d0CV_LoadImageim0(filename,name_size,loadtype) imcvLoadImages(filename,loadtype)
#define g2im0CV_ShowImageu80(winname,win_size,img) imcvShowImages(winname,img)
diff --git a/2.3-1/src/c/imageProcessing/interfaces/int_cvimgproc.h b/2.3-1/src/c/imageProcessing/interfaces/int_cvimgproc.h
new file mode 100644
index 00000000..47192e10
--- /dev/null
+++ b/2.3-1/src/c/imageProcessing/interfaces/int_cvimgproc.h
@@ -0,0 +1,32 @@
+/* Copyright (C) 2016 - IIT Bombay - FOSSEE
+
+ This file must be used under the terms of the CeCILL.
+ This source file is licensed as described in the file COPYING, which
+ you should have received as part of this distribution. The terms
+ are also available at
+ http://www.cecill.info/licences/Licence_CeCILL_V2-en.txt
+ Author: Siddhesh Wani
+ Organization: FOSSEE, IIT Bombay
+ Email: toolbox@scilab.in
+*/
+
+#ifndef __INT_CVIMGPROC_H__
+#define __INT_CVIMGPROC_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+#include "types.h"
+#include "opencv2/imgproc/imgproc.hpp"
+
+#define im0im0g2CV_CvtColoru80(src,dst,code,code_size) imcvCvtColors(src,dst,code)
+#define im0d0d0g2CV_Thresholdim0(src,threshold,maxvalue,thresh_type,type_size)
+ imcvThrehold(src,threshold,maxvalue,thresh_type)
+
+#ifdef __cplusplus
+} /* extern "C" */
+#endif
+
+#endif /*__INT_CVIMGPROC_H__*/