summaryrefslogtreecommitdiff
path: root/thirdparty/linux/include/opencv2/ximgproc
diff options
context:
space:
mode:
authorshamikam2017-01-16 02:56:17 +0530
committershamikam2017-01-16 02:56:17 +0530
commita6df67e8bcd5159cde27556f4f6a315f8dc2215f (patch)
treee806e966b06a53388fb300d89534354b222c2cad /thirdparty/linux/include/opencv2/ximgproc
downloadFOSSEE_Image_Processing_Toolbox-a6df67e8bcd5159cde27556f4f6a315f8dc2215f.tar.gz
FOSSEE_Image_Processing_Toolbox-a6df67e8bcd5159cde27556f4f6a315f8dc2215f.tar.bz2
FOSSEE_Image_Processing_Toolbox-a6df67e8bcd5159cde27556f4f6a315f8dc2215f.zip
First CommitHEADmaster
Diffstat (limited to 'thirdparty/linux/include/opencv2/ximgproc')
-rw-r--r--thirdparty/linux/include/opencv2/ximgproc/deriche_filter.hpp77
-rw-r--r--thirdparty/linux/include/opencv2/ximgproc/disparity_filter.hpp210
-rw-r--r--thirdparty/linux/include/opencv2/ximgproc/edge_filter.hpp454
-rw-r--r--thirdparty/linux/include/opencv2/ximgproc/estimated_covariance.hpp82
-rw-r--r--thirdparty/linux/include/opencv2/ximgproc/fast_hough_transform.hpp164
-rw-r--r--thirdparty/linux/include/opencv2/ximgproc/fast_line_detector.hpp81
-rw-r--r--thirdparty/linux/include/opencv2/ximgproc/lsc.hpp157
-rw-r--r--thirdparty/linux/include/opencv2/ximgproc/paillou_filter.hpp67
-rw-r--r--thirdparty/linux/include/opencv2/ximgproc/seeds.hpp183
-rw-r--r--thirdparty/linux/include/opencv2/ximgproc/segmentation.hpp252
-rw-r--r--thirdparty/linux/include/opencv2/ximgproc/slic.hpp168
-rw-r--r--thirdparty/linux/include/opencv2/ximgproc/sparse_match_interpolator.hpp132
-rw-r--r--thirdparty/linux/include/opencv2/ximgproc/structured_edge_detection.hpp128
-rw-r--r--thirdparty/linux/include/opencv2/ximgproc/weighted_median_filter.hpp95
14 files changed, 2250 insertions, 0 deletions
diff --git a/thirdparty/linux/include/opencv2/ximgproc/deriche_filter.hpp b/thirdparty/linux/include/opencv2/ximgproc/deriche_filter.hpp
new file mode 100644
index 0000000..2371feb
--- /dev/null
+++ b/thirdparty/linux/include/opencv2/ximgproc/deriche_filter.hpp
@@ -0,0 +1,77 @@
+/*
+ * By downloading, copying, installing or using the software you agree to this license.
+ * If you do not agree to this license, do not download, install,
+ * copy or use the software.
+ *
+ *
+ * License Agreement
+ * For Open Source Computer Vision Library
+ * (3 - clause BSD License)
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met :
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and / or other materials provided with the distribution.
+ *
+ * * Neither the names of the copyright holders nor the names of the contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * This software is provided by the copyright holders and contributors "as is" and
+ * any express or implied warranties, including, but not limited to, the implied
+ * warranties of merchantability and fitness for a particular purpose are disclaimed.
+ * In no event shall copyright holders or contributors be liable for any direct,
+ * indirect, incidental, special, exemplary, or consequential damages
+ * (including, but not limited to, procurement of substitute goods or services;
+ * loss of use, data, or profits; or business interruption) however caused
+ * and on any theory of liability, whether in contract, strict liability,
+ * or tort(including negligence or otherwise) arising in any way out of
+ * the use of this software, even if advised of the possibility of such damage.
+ */
+
+#ifndef __OPENCV_DERICHEFILTER_HPP__
+#define __OPENCV_DERICHEFILTER_HPP__
+#ifdef __cplusplus
+
+#include <opencv2/core.hpp>
+
+namespace cv {
+namespace ximgproc {
+
+//! @addtogroup ximgproc_filters
+//! @{
+
+/**
+* @brief Applies Y Deriche filter to an image.
+*
+* For more details about this implementation, please see http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.476.5736&rep=rep1&type=pdf
+*
+* @param _op Source 8-bit or 16bit image, 1-channel or 3-channel image.
+* @param _dst result CV_32FC image with same number of channel than _op.
+* @param alphaDerive double see paper
+* @param alphaMean double see paper
+*
+*/
+CV_EXPORTS void GradientDericheY(InputArray _op, OutputArray _dst, double alphaDerive,double alphaMean);
+/**
+* @brief Applies X Deriche filter to an image.
+*
+* For more details about this implementation, please see http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.476.5736&rep=rep1&type=pdf
+*
+* @param _op Source 8-bit or 16bit image, 1-channel or 3-channel image.
+* @param _dst result CV_32FC image with same number of channel than _op.
+* @param alphaDerive double see paper
+* @param alphaMean double see paper
+*
+*/
+CV_EXPORTS void GradientDericheX(InputArray _op, OutputArray _dst, double alphaDerive,double alphaMean);
+
+}
+}
+#endif
+#endif
diff --git a/thirdparty/linux/include/opencv2/ximgproc/disparity_filter.hpp b/thirdparty/linux/include/opencv2/ximgproc/disparity_filter.hpp
new file mode 100644
index 0000000..b738436
--- /dev/null
+++ b/thirdparty/linux/include/opencv2/ximgproc/disparity_filter.hpp
@@ -0,0 +1,210 @@
+/*
+ * By downloading, copying, installing or using the software you agree to this license.
+ * If you do not agree to this license, do not download, install,
+ * copy or use the software.
+ *
+ *
+ * License Agreement
+ * For Open Source Computer Vision Library
+ * (3 - clause BSD License)
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met :
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and / or other materials provided with the distribution.
+ *
+ * * Neither the names of the copyright holders nor the names of the contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * This software is provided by the copyright holders and contributors "as is" and
+ * any express or implied warranties, including, but not limited to, the implied
+ * warranties of merchantability and fitness for a particular purpose are disclaimed.
+ * In no event shall copyright holders or contributors be liable for any direct,
+ * indirect, incidental, special, exemplary, or consequential damages
+ * (including, but not limited to, procurement of substitute goods or services;
+ * loss of use, data, or profits; or business interruption) however caused
+ * and on any theory of liability, whether in contract, strict liability,
+ * or tort(including negligence or otherwise) arising in any way out of
+ * the use of this software, even if advised of the possibility of such damage.
+ */
+
+#ifndef __OPENCV_DISPARITYFILTER_HPP__
+#define __OPENCV_DISPARITYFILTER_HPP__
+#ifdef __cplusplus
+
+#include <opencv2/core.hpp>
+#include <opencv2/calib3d.hpp>
+
+namespace cv {
+namespace ximgproc {
+
+//! @addtogroup ximgproc_filters
+//! @{
+
+/** @brief Main interface for all disparity map filters.
+ */
+class CV_EXPORTS_W DisparityFilter : public Algorithm
+{
+public:
+
+ /** @brief Apply filtering to the disparity map.
+
+ @param disparity_map_left disparity map of the left view, 1 channel, CV_16S type. Implicitly assumes that disparity
+ values are scaled by 16 (one-pixel disparity corresponds to the value of 16 in the disparity map). Disparity map
+ can have any resolution, it will be automatically resized to fit left_view resolution.
+
+ @param left_view left view of the original stereo-pair to guide the filtering process, 8-bit single-channel
+ or three-channel image.
+
+ @param filtered_disparity_map output disparity map.
+
+ @param disparity_map_right optional argument, some implementations might also use the disparity map
+ of the right view to compute confidence maps, for instance.
+
+ @param ROI region of the disparity map to filter. Optional, usually it should be set automatically.
+
+ @param right_view optional argument, some implementations might also use the right view of the original
+ stereo-pair.
+ */
+ CV_WRAP virtual void filter(InputArray disparity_map_left, InputArray left_view, OutputArray filtered_disparity_map, InputArray disparity_map_right = Mat(), Rect ROI = Rect(), InputArray right_view = Mat()) = 0;
+};
+
+/** @brief Disparity map filter based on Weighted Least Squares filter (in form of Fast Global Smoother that
+is a lot faster than traditional Weighted Least Squares filter implementations) and optional use of
+left-right-consistency-based confidence to refine the results in half-occlusions and uniform areas.
+ */
+class CV_EXPORTS_W DisparityWLSFilter : public DisparityFilter
+{
+public:
+ /** filter parameters */
+
+ /** @brief Lambda is a parameter defining the amount of regularization during filtering. Larger values force
+ filtered disparity map edges to adhere more to source image edges. Typical value is 8000.
+ */
+ CV_WRAP virtual double getLambda() = 0;
+ /** @see getLambda */
+ CV_WRAP virtual void setLambda(double _lambda) = 0;
+ /** @brief SigmaColor is a parameter defining how sensitive the filtering process is to source image edges.
+ Large values can lead to disparity leakage through low-contrast edges. Small values can make the filter too
+ sensitive to noise and textures in the source image. Typical values range from 0.8 to 2.0.
+ */
+ CV_WRAP virtual double getSigmaColor() = 0;
+ /** @see getSigmaColor */
+ CV_WRAP virtual void setSigmaColor(double _sigma_color) = 0;
+
+ /** confidence-related parameters */
+
+ /** @brief LRCthresh is a threshold of disparity difference used in left-right-consistency check during
+ confidence map computation. The default value of 24 (1.5 pixels) is virtually always good enough.
+ */
+ CV_WRAP virtual int getLRCthresh() = 0;
+ /** @see getLRCthresh */
+ CV_WRAP virtual void setLRCthresh(int _LRC_thresh) = 0;
+ /** @brief DepthDiscontinuityRadius is a parameter used in confidence computation. It defines the size of
+ low-confidence regions around depth discontinuities.
+ */
+ CV_WRAP virtual int getDepthDiscontinuityRadius() = 0;
+ /** @see getDepthDiscontinuityRadius */
+ CV_WRAP virtual void setDepthDiscontinuityRadius(int _disc_radius) = 0;
+ /** @brief Get the confidence map that was used in the last filter call. It is a CV_32F one-channel image
+ with values ranging from 0.0 (totally untrusted regions of the raw disparity map) to 255.0 (regions containing
+ correct disparity values with a high degree of confidence).
+ */
+ CV_WRAP virtual Mat getConfidenceMap() = 0;
+ /** @brief Get the ROI used in the last filter call
+ */
+ CV_WRAP virtual Rect getROI() = 0;
+};
+
+/** @brief Convenience factory method that creates an instance of DisparityWLSFilter and sets up all the relevant
+filter parameters automatically based on the matcher instance. Currently supports only StereoBM and StereoSGBM.
+
+@param matcher_left stereo matcher instance that will be used with the filter
+*/
+CV_EXPORTS_W
+Ptr<DisparityWLSFilter> createDisparityWLSFilter(Ptr<StereoMatcher> matcher_left);
+
+/** @brief Convenience method to set up the matcher for computing the right-view disparity map
+that is required in case of filtering with confidence.
+
+@param matcher_left main stereo matcher instance that will be used with the filter
+*/
+CV_EXPORTS_W
+Ptr<StereoMatcher> createRightMatcher(Ptr<StereoMatcher> matcher_left);
+
+/** @brief More generic factory method, create instance of DisparityWLSFilter and execute basic
+initialization routines. When using this method you will need to set-up the ROI, matchers and
+other parameters by yourself.
+
+@param use_confidence filtering with confidence requires two disparity maps (for the left and right views) and is
+approximately two times slower. However, quality is typically significantly better.
+*/
+CV_EXPORTS_W
+Ptr<DisparityWLSFilter> createDisparityWLSFilterGeneric(bool use_confidence);
+
+//////////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////////
+
+/** @brief Function for reading ground truth disparity maps. Supports basic Middlebury
+and MPI-Sintel formats. Note that the resulting disparity map is scaled by 16.
+
+@param src_path path to the image, containing ground-truth disparity map
+
+@param dst output disparity map, CV_16S depth
+
+@result returns zero if successfully read the ground truth
+ */
+CV_EXPORTS
+int readGT(String src_path,OutputArray dst);
+
+/** @brief Function for computing mean square error for disparity maps
+
+@param GT ground truth disparity map
+
+@param src disparity map to evaluate
+
+@param ROI region of interest
+
+@result returns mean square error between GT and src
+ */
+CV_EXPORTS
+double computeMSE(InputArray GT, InputArray src, Rect ROI);
+
+/** @brief Function for computing the percent of "bad" pixels in the disparity map
+(pixels where error is higher than a specified threshold)
+
+@param GT ground truth disparity map
+
+@param src disparity map to evaluate
+
+@param ROI region of interest
+
+@param thresh threshold used to determine "bad" pixels
+
+@result returns mean square error between GT and src
+ */
+CV_EXPORTS
+double computeBadPixelPercent(InputArray GT, InputArray src, Rect ROI, int thresh=24/*1.5 pixels*/);
+
+/** @brief Function for creating a disparity map visualization (clamped CV_8U image)
+
+@param src input disparity map (CV_16S depth)
+
+@param dst output visualization
+
+@param scale disparity map will be multiplied by this value for visualization
+ */
+CV_EXPORTS
+void getDisparityVis(InputArray src,OutputArray dst,double scale=1.0);
+
+//! @}
+}
+}
+#endif
+#endif
diff --git a/thirdparty/linux/include/opencv2/ximgproc/edge_filter.hpp b/thirdparty/linux/include/opencv2/ximgproc/edge_filter.hpp
new file mode 100644
index 0000000..65cff9e
--- /dev/null
+++ b/thirdparty/linux/include/opencv2/ximgproc/edge_filter.hpp
@@ -0,0 +1,454 @@
+/*
+ * By downloading, copying, installing or using the software you agree to this license.
+ * If you do not agree to this license, do not download, install,
+ * copy or use the software.
+ *
+ *
+ * License Agreement
+ * For Open Source Computer Vision Library
+ * (3 - clause BSD License)
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met :
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and / or other materials provided with the distribution.
+ *
+ * * Neither the names of the copyright holders nor the names of the contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * This software is provided by the copyright holders and contributors "as is" and
+ * any express or implied warranties, including, but not limited to, the implied
+ * warranties of merchantability and fitness for a particular purpose are disclaimed.
+ * In no event shall copyright holders or contributors be liable for any direct,
+ * indirect, incidental, special, exemplary, or consequential damages
+ * (including, but not limited to, procurement of substitute goods or services;
+ * loss of use, data, or profits; or business interruption) however caused
+ * and on any theory of liability, whether in contract, strict liability,
+ * or tort(including negligence or otherwise) arising in any way out of
+ * the use of this software, even if advised of the possibility of such damage.
+ */
+
+#ifndef __OPENCV_EDGEFILTER_HPP__
+#define __OPENCV_EDGEFILTER_HPP__
+#ifdef __cplusplus
+
+#include <opencv2/core.hpp>
+
+namespace cv
+{
+namespace ximgproc
+{
+
+//! @addtogroup ximgproc_filters
+//! @{
+
+enum EdgeAwareFiltersList
+{
+ DTF_NC,
+ DTF_IC,
+ DTF_RF,
+
+ GUIDED_FILTER,
+ AM_FILTER
+};
+
+
+/** @brief Interface for realizations of Domain Transform filter.
+
+For more details about this filter see @cite Gastal11 .
+ */
+class CV_EXPORTS_W DTFilter : public Algorithm
+{
+public:
+
+ /** @brief Produce domain transform filtering operation on source image.
+
+ @param src filtering image with unsigned 8-bit or floating-point 32-bit depth and up to 4 channels.
+
+ @param dst destination image.
+
+ @param dDepth optional depth of the output image. dDepth can be set to -1, which will be equivalent
+ to src.depth().
+ */
+ CV_WRAP virtual void filter(InputArray src, OutputArray dst, int dDepth = -1) = 0;
+};
+
+/** @brief Factory method, create instance of DTFilter and produce initialization routines.
+
+@param guide guided image (used to build transformed distance, which describes edge structure of
+guided image).
+
+@param sigmaSpatial \f${\sigma}_H\f$ parameter in the original article, it's similar to the sigma in the
+coordinate space into bilateralFilter.
+
+@param sigmaColor \f${\sigma}_r\f$ parameter in the original article, it's similar to the sigma in the
+color space into bilateralFilter.
+
+@param mode one form three modes DTF_NC, DTF_RF and DTF_IC which corresponds to three modes for
+filtering 2D signals in the article.
+
+@param numIters optional number of iterations used for filtering, 3 is quite enough.
+
+For more details about Domain Transform filter parameters, see the original article @cite Gastal11 and
+[Domain Transform filter homepage](http://www.inf.ufrgs.br/~eslgastal/DomainTransform/).
+ */
+CV_EXPORTS_W
+Ptr<DTFilter> createDTFilter(InputArray guide, double sigmaSpatial, double sigmaColor, int mode = DTF_NC, int numIters = 3);
+
+/** @brief Simple one-line Domain Transform filter call. If you have multiple images to filter with the same
+guided image then use DTFilter interface to avoid extra computations on initialization stage.
+
+@param guide guided image (also called as joint image) with unsigned 8-bit or floating-point 32-bit
+depth and up to 4 channels.
+@param src filtering image with unsigned 8-bit or floating-point 32-bit depth and up to 4 channels.
+@param dst
+@param sigmaSpatial \f${\sigma}_H\f$ parameter in the original article, it's similar to the sigma in the
+coordinate space into bilateralFilter.
+@param sigmaColor \f${\sigma}_r\f$ parameter in the original article, it's similar to the sigma in the
+color space into bilateralFilter.
+@param mode one form three modes DTF_NC, DTF_RF and DTF_IC which corresponds to three modes for
+filtering 2D signals in the article.
+@param numIters optional number of iterations used for filtering, 3 is quite enough.
+@sa bilateralFilter, guidedFilter, amFilter
+ */
+CV_EXPORTS_W
+void dtFilter(InputArray guide, InputArray src, OutputArray dst, double sigmaSpatial, double sigmaColor, int mode = DTF_NC, int numIters = 3);
+
+//////////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////////
+
+/** @brief Interface for realizations of Guided Filter.
+
+For more details about this filter see @cite Kaiming10 .
+ */
+class CV_EXPORTS_W GuidedFilter : public Algorithm
+{
+public:
+
+ /** @brief Apply Guided Filter to the filtering image.
+
+ @param src filtering image with any numbers of channels.
+
+ @param dst output image.
+
+ @param dDepth optional depth of the output image. dDepth can be set to -1, which will be equivalent
+ to src.depth().
+ */
+ CV_WRAP virtual void filter(InputArray src, OutputArray dst, int dDepth = -1) = 0;
+};
+
+/** @brief Factory method, create instance of GuidedFilter and produce initialization routines.
+
+@param guide guided image (or array of images) with up to 3 channels, if it have more then 3
+channels then only first 3 channels will be used.
+
+@param radius radius of Guided Filter.
+
+@param eps regularization term of Guided Filter. \f${eps}^2\f$ is similar to the sigma in the color
+space into bilateralFilter.
+
+For more details about Guided Filter parameters, see the original article @cite Kaiming10 .
+ */
+CV_EXPORTS_W Ptr<GuidedFilter> createGuidedFilter(InputArray guide, int radius, double eps);
+
+/** @brief Simple one-line Guided Filter call.
+
+If you have multiple images to filter with the same guided image then use GuidedFilter interface to
+avoid extra computations on initialization stage.
+
+@param guide guided image (or array of images) with up to 3 channels, if it have more then 3
+channels then only first 3 channels will be used.
+
+@param src filtering image with any numbers of channels.
+
+@param dst output image.
+
+@param radius radius of Guided Filter.
+
+@param eps regularization term of Guided Filter. \f${eps}^2\f$ is similar to the sigma in the color
+space into bilateralFilter.
+
+@param dDepth optional depth of the output image.
+
+@sa bilateralFilter, dtFilter, amFilter */
+CV_EXPORTS_W void guidedFilter(InputArray guide, InputArray src, OutputArray dst, int radius, double eps, int dDepth = -1);
+
+//////////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////////
+
+/** @brief Interface for Adaptive Manifold Filter realizations.
+
+For more details about this filter see @cite Gastal12 and References_.
+
+Below listed optional parameters which may be set up with Algorithm::set function.
+- member double sigma_s = 16.0
+Spatial standard deviation.
+- member double sigma_r = 0.2
+Color space standard deviation.
+- member int tree_height = -1
+Height of the manifold tree (default = -1 : automatically computed).
+- member int num_pca_iterations = 1
+Number of iterations to computed the eigenvector.
+- member bool adjust_outliers = false
+Specify adjust outliers using Eq. 9 or not.
+- member bool use_RNG = true
+Specify use random number generator to compute eigenvector or not.
+ */
+class CV_EXPORTS_W AdaptiveManifoldFilter : public Algorithm
+{
+public:
+ /** @brief Apply high-dimensional filtering using adaptive manifolds.
+
+ @param src filtering image with any numbers of channels.
+
+ @param dst output image.
+
+ @param joint optional joint (also called as guided) image with any numbers of channels.
+ */
+ CV_WRAP virtual void filter(InputArray src, OutputArray dst, InputArray joint = noArray()) = 0;
+
+ CV_WRAP virtual void collectGarbage() = 0;
+
+ CV_WRAP static Ptr<AdaptiveManifoldFilter> create();
+
+ /** @see setSigmaS */
+ virtual double getSigmaS() const = 0;
+ /** @copybrief getSigmaS @see getSigmaS */
+ virtual void setSigmaS(double val) = 0;
+ /** @see setSigmaR */
+ virtual double getSigmaR() const = 0;
+ /** @copybrief getSigmaR @see getSigmaR */
+ virtual void setSigmaR(double val) = 0;
+ /** @see setTreeHeight */
+ virtual int getTreeHeight() const = 0;
+ /** @copybrief getTreeHeight @see getTreeHeight */
+ virtual void setTreeHeight(int val) = 0;
+ /** @see setPCAIterations */
+ virtual int getPCAIterations() const = 0;
+ /** @copybrief getPCAIterations @see getPCAIterations */
+ virtual void setPCAIterations(int val) = 0;
+ /** @see setAdjustOutliers */
+ virtual bool getAdjustOutliers() const = 0;
+ /** @copybrief getAdjustOutliers @see getAdjustOutliers */
+ virtual void setAdjustOutliers(bool val) = 0;
+ /** @see setUseRNG */
+ virtual bool getUseRNG() const = 0;
+ /** @copybrief getUseRNG @see getUseRNG */
+ virtual void setUseRNG(bool val) = 0;
+};
+
+/** @brief Factory method, create instance of AdaptiveManifoldFilter and produce some initialization routines.
+
+@param sigma_s spatial standard deviation.
+
+@param sigma_r color space standard deviation, it is similar to the sigma in the color space into
+bilateralFilter.
+
+@param adjust_outliers optional, specify perform outliers adjust operation or not, (Eq. 9) in the
+original paper.
+
+For more details about Adaptive Manifold Filter parameters, see the original article @cite Gastal12 .
+
+@note Joint images with CV_8U and CV_16U depth converted to images with CV_32F depth and [0; 1]
+color range before processing. Hence color space sigma sigma_r must be in [0; 1] range, unlike same
+sigmas in bilateralFilter and dtFilter functions.
+*/
+CV_EXPORTS_W Ptr<AdaptiveManifoldFilter> createAMFilter(double sigma_s, double sigma_r, bool adjust_outliers = false);
+
+/** @brief Simple one-line Adaptive Manifold Filter call.
+
+@param joint joint (also called as guided) image or array of images with any numbers of channels.
+
+@param src filtering image with any numbers of channels.
+
+@param dst output image.
+
+@param sigma_s spatial standard deviation.
+
+@param sigma_r color space standard deviation, it is similar to the sigma in the color space into
+bilateralFilter.
+
+@param adjust_outliers optional, specify perform outliers adjust operation or not, (Eq. 9) in the
+original paper.
+
+@note Joint images with CV_8U and CV_16U depth converted to images with CV_32F depth and [0; 1]
+color range before processing. Hence color space sigma sigma_r must be in [0; 1] range, unlike same
+sigmas in bilateralFilter and dtFilter functions. @sa bilateralFilter, dtFilter, guidedFilter
+*/
+CV_EXPORTS_W void amFilter(InputArray joint, InputArray src, OutputArray dst, double sigma_s, double sigma_r, bool adjust_outliers = false);
+
+//////////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////////
+
+/** @brief Applies the joint bilateral filter to an image.
+
+@param joint Joint 8-bit or floating-point, 1-channel or 3-channel image.
+
+@param src Source 8-bit or floating-point, 1-channel or 3-channel image with the same depth as joint
+image.
+
+@param dst Destination image of the same size and type as src .
+
+@param d Diameter of each pixel neighborhood that is used during filtering. If it is non-positive,
+it is computed from sigmaSpace .
+
+@param sigmaColor Filter sigma in the color space. A larger value of the parameter means that
+farther colors within the pixel neighborhood (see sigmaSpace ) will be mixed together, resulting in
+larger areas of semi-equal color.
+
+@param sigmaSpace Filter sigma in the coordinate space. A larger value of the parameter means that
+farther pixels will influence each other as long as their colors are close enough (see sigmaColor ).
+When d\>0 , it specifies the neighborhood size regardless of sigmaSpace . Otherwise, d is
+proportional to sigmaSpace .
+
+@param borderType
+
+@note bilateralFilter and jointBilateralFilter use L1 norm to compute difference between colors.
+
+@sa bilateralFilter, amFilter
+*/
+CV_EXPORTS_W
+void jointBilateralFilter(InputArray joint, InputArray src, OutputArray dst, int d, double sigmaColor, double sigmaSpace, int borderType = BORDER_DEFAULT);
+
+/** @brief Applies the bilateral texture filter to an image. It performs structure-preserving texture filter.
+For more details about this filter see @cite Cho2014.
+
+@param src Source image whose depth is 8-bit UINT or 32-bit FLOAT
+
+@param dst Destination image of the same size and type as src.
+
+@param fr Radius of kernel to be used for filtering. It should be positive integer
+
+@param numIter Number of iterations of algorithm, It should be positive integer
+
+@param sigmaAlpha Controls the sharpness of the weight transition from edges to smooth/texture regions, where
+a bigger value means sharper transition. When the value is negative, it is automatically calculated.
+
+@param sigmaAvg Range blur parameter for texture blurring. Larger value makes result to be more blurred. When the
+value is negative, it is automatically calculated as described in the paper.
+
+@sa rollingGuidanceFilter, bilateralFilter
+*/
+CV_EXPORTS_W
+void bilateralTextureFilter(InputArray src, OutputArray dst, int fr = 3, int numIter = 1, double sigmaAlpha = -1., double sigmaAvg = -1.);
+
+//////////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////////
+
+/** @brief Applies the rolling guidance filter to an image.
+
+For more details, please see @cite zhang2014rolling
+
+@param src Source 8-bit or floating-point, 1-channel or 3-channel image.
+
+@param dst Destination image of the same size and type as src.
+
+@param d Diameter of each pixel neighborhood that is used during filtering. If it is non-positive,
+it is computed from sigmaSpace .
+
+@param sigmaColor Filter sigma in the color space. A larger value of the parameter means that
+farther colors within the pixel neighborhood (see sigmaSpace ) will be mixed together, resulting in
+larger areas of semi-equal color.
+
+@param sigmaSpace Filter sigma in the coordinate space. A larger value of the parameter means that
+farther pixels will influence each other as long as their colors are close enough (see sigmaColor ).
+When d\>0 , it specifies the neighborhood size regardless of sigmaSpace . Otherwise, d is
+proportional to sigmaSpace .
+
+@param numOfIter Number of iterations of joint edge-preserving filtering applied on the source image.
+
+@param borderType
+
+@note rollingGuidanceFilter uses jointBilateralFilter as the edge-preserving filter.
+
+@sa jointBilateralFilter, bilateralFilter, amFilter
+*/
+CV_EXPORTS_W
+void rollingGuidanceFilter(InputArray src, OutputArray dst, int d = -1, double sigmaColor = 25, double sigmaSpace = 3, int numOfIter = 4, int borderType = BORDER_DEFAULT);
+
+//////////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////////
+
+
+/** @brief Interface for implementations of Fast Global Smoother filter.
+
+For more details about this filter see @cite Min2014 and @cite Farbman2008 .
+*/
+class CV_EXPORTS_W FastGlobalSmootherFilter : public Algorithm
+{
+public:
+ /** @brief Apply smoothing operation to the source image.
+
+ @param src source image for filtering with unsigned 8-bit or signed 16-bit or floating-point 32-bit depth and up to 4 channels.
+
+ @param dst destination image.
+ */
+ CV_WRAP virtual void filter(InputArray src, OutputArray dst) = 0;
+};
+
+/** @brief Factory method, create instance of FastGlobalSmootherFilter and execute the initialization routines.
+
+@param guide image serving as guide for filtering. It should have 8-bit depth and either 1 or 3 channels.
+
+@param lambda parameter defining the amount of regularization
+
+@param sigma_color parameter, that is similar to color space sigma in bilateralFilter.
+
+@param lambda_attenuation internal parameter, defining how much lambda decreases after each iteration. Normally,
+it should be 0.25. Setting it to 1.0 may lead to streaking artifacts.
+
+@param num_iter number of iterations used for filtering, 3 is usually enough.
+
+For more details about Fast Global Smoother parameters, see the original paper @cite Min2014. However, please note that
+there are several differences. Lambda attenuation described in the paper is implemented a bit differently so do not
+expect the results to be identical to those from the paper; sigma_color values from the paper should be multiplied by 255.0 to
+achieve the same effect. Also, in case of image filtering where source and guide image are the same, authors
+propose to dynamically update the guide image after each iteration. To maximize the performance this feature
+was not implemented here.
+*/
+CV_EXPORTS_W Ptr<FastGlobalSmootherFilter> createFastGlobalSmootherFilter(InputArray guide, double lambda, double sigma_color, double lambda_attenuation=0.25, int num_iter=3);
+
+/** @brief Simple one-line Fast Global Smoother filter call. If you have multiple images to filter with the same
+guide then use FastGlobalSmootherFilter interface to avoid extra computations.
+
+@param guide image serving as guide for filtering. It should have 8-bit depth and either 1 or 3 channels.
+
+@param src source image for filtering with unsigned 8-bit or signed 16-bit or floating-point 32-bit depth and up to 4 channels.
+
+@param dst destination image.
+
+@param lambda parameter defining the amount of regularization
+
+@param sigma_color parameter, that is similar to color space sigma in bilateralFilter.
+
+@param lambda_attenuation internal parameter, defining how much lambda decreases after each iteration. Normally,
+it should be 0.25. Setting it to 1.0 may lead to streaking artifacts.
+
+@param num_iter number of iterations used for filtering, 3 is usually enough.
+*/
+CV_EXPORTS_W void fastGlobalSmootherFilter(InputArray guide, InputArray src, OutputArray dst, double lambda, double sigma_color, double lambda_attenuation=0.25, int num_iter=3);
+
+/** @brief Global image smoothing via L0 gradient minimization.
+
+@param src source image for filtering with unsigned 8-bit or signed 16-bit or floating-point depth.
+
+@param dst destination image.
+
+@param lambda parameter defining the smooth term weight.
+
+@param kappa parameter defining the increasing factor of the weight of the gradient data term.
+
+For more details about L0 Smoother, see the original paper @cite xu2011image.
+*/
+CV_EXPORTS_W void l0Smooth(InputArray src, OutputArray dst, double lambda = 0.02, double kappa = 2.0);
+//! @}
+}
+}
+#endif
+#endif
diff --git a/thirdparty/linux/include/opencv2/ximgproc/estimated_covariance.hpp b/thirdparty/linux/include/opencv2/ximgproc/estimated_covariance.hpp
new file mode 100644
index 0000000..e4f5ddf
--- /dev/null
+++ b/thirdparty/linux/include/opencv2/ximgproc/estimated_covariance.hpp
@@ -0,0 +1,82 @@
+/*
+By downloading, copying, installing or using the software you agree to this license.
+If you do not agree to this license, do not download, install,
+copy or use the software.
+
+
+ License Agreement
+ For Open Source Computer Vision Library
+ (3-clause BSD License)
+
+Copyright (C) 2000-2015, Intel Corporation, all rights reserved.
+Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
+Copyright (C) 2009-2015, NVIDIA Corporation, all rights reserved.
+Copyright (C) 2010-2013, Advanced Micro Devices, Inc., all rights reserved.
+Copyright (C) 2015, OpenCV Foundation, all rights reserved.
+Copyright (C) 2015, Itseez Inc., all rights reserved.
+Third party copyrights are property of their respective owners.
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+ * Neither the names of the copyright holders nor the names of the contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+This software is provided by the copyright holders and contributors "as is" and
+any express or implied warranties, including, but not limited to, the implied
+warranties of merchantability and fitness for a particular purpose are disclaimed.
+In no event shall copyright holders or contributors be liable for any direct,
+indirect, incidental, special, exemplary, or consequential damages
+(including, but not limited to, procurement of substitute goods or services;
+loss of use, data, or profits; or business interruption) however caused
+and on any theory of liability, whether in contract, strict liability,
+or tort (including negligence or otherwise) arising in any way out of
+the use of this software, even if advised of the possibility of such damage.
+
+Algorithmic details of this algorithm can be found at:
+ * O. Green, Y. Birk, "A Computationally Efficient Algorithm for the 2D Covariance Method", ACM/IEEE International Conference on High Performance Computing, Networking, Storage and Analysis, Denver, Colorado, 2013
+A previous and less efficient version of the algorithm can be found:
+ * O. Green, L. David, A. Galperin, Y. Birk, "Efficient parallel computation of the estimated covariance matrix", arXiv, 2013
+
+
+*/
+#ifndef __OPENCV_ESTIMATECOVARIANCE_HPP__
+#define __OPENCV_ESTIMATECOVARIANCE_HPP__
+#ifdef __cplusplus
+
+#include <opencv2/core.hpp>
+
+namespace cv
+{
+namespace ximgproc
+{
+
+/** @brief Computes the estimated covariance matrix of an image using the sliding
+window forumlation.
+
+@param src The source image. Input image must be of a complex type.
+@param dst The destination estimated covariance matrix. Output matrix will be size (windowRows*windowCols, windowRows*windowCols).
+@param windowRows The number of rows in the window.
+@param windowCols The number of cols in the window.
+The window size parameters control the accuracy of the estimation.
+The sliding window moves over the entire image from the top-left corner
+to the bottom right corner. Each location of the window represents a sample.
+If the window is the size of the image, then this gives the exact covariance matrix.
+For all other cases, the sizes of the window will impact the number of samples
+and the number of elements in the estimated covariance matrix.
+*/
+
+CV_EXPORTS_W void covarianceEstimation(InputArray src, OutputArray dst, int windowRows, int windowCols);
+
+}
+}
+#endif
+#endif
diff --git a/thirdparty/linux/include/opencv2/ximgproc/fast_hough_transform.hpp b/thirdparty/linux/include/opencv2/ximgproc/fast_hough_transform.hpp
new file mode 100644
index 0000000..cdfb032
--- /dev/null
+++ b/thirdparty/linux/include/opencv2/ximgproc/fast_hough_transform.hpp
@@ -0,0 +1,164 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2015, Smart Engines Ltd, all rights reserved.
+// Copyright (C) 2015, Institute for Information Transmission Problems of the Russian Academy of Sciences (Kharkevich Institute), all rights reserved.
+// Copyright (C) 2015, Dmitry Nikolaev, Simon Karpenko, Michail Aliev, Elena Kuznetsova, all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_FAST_HOUGH_TRANSFORM_HPP__
+#define __OPENCV_FAST_HOUGH_TRANSFORM_HPP__
+#ifdef __cplusplus
+
+#include "opencv2/core.hpp"
+
+
+namespace cv { namespace ximgproc {
+
+/**
+* @brief Specifies the part of Hough space to calculate
+* @details The enum specifies the part of Hough space to calculate. Each
+* member specifies primarily direction of lines (horizontal or vertical)
+* and the direction of angle changes.
+* Direction of angle changes is from multiples of 90 to odd multiples of 45.
+* The image considered to be written top-down and left-to-right.
+* Angles are started from vertical line and go clockwise.
+* Separate quarters and halves are written in orientation they should be in
+* full Hough space.
+*/
+enum AngleRangeOption
+{
+ ARO_0_45 = 0, //< Vertical primarily direction and clockwise angle changes
+ ARO_45_90 = 1, //< Horizontal primarily direction and counterclockwise angle changes
+ ARO_90_135 = 2, //< Horizontal primarily direction and clockwise angle changes
+ ARO_315_0 = 3, //< Vertical primarily direction and counterclockwise angle changes
+ ARO_315_45 = 4, //< Vertical primarily direction
+ ARO_45_135 = 5, //< Horizontal primarily direction
+ ARO_315_135 = 6, //< Full set of directions
+ ARO_CTR_HOR = 7, //< 90 +/- atan(0.5), interval approximately from 64.5 to 116.5 degrees.
+ //< It is used for calculating Fast Hough Transform for images skewed by atan(0.5).
+ ARO_CTR_VER = 8 //< +/- atan(0.5), interval approximately from 333.5(-26.5) to 26.5 degrees
+ //< It is used for calculating Fast Hough Transform for images skewed by atan(0.5).
+};
+
+/**
+ * @brief Specifies binary operations.
+ * @details The enum specifies binary operations, that is such ones which involve
+ * two operands. Formally, a binary operation @f$ f @f$ on a set @f$ S @f$
+ * is a binary relation that maps elements of the Cartesian product
+ * @f$ S \times S @f$ to @f$ S @f$:
+* @f[ f: S \times S \to S @f]
+ * @ingroup MinUtils_MathOper
+ */
+enum HoughOp
+{
+ FHT_MIN = 0, //< Binary minimum operation. The constant specifies the binary minimum operation
+ //< @f$ f @f$ that is defined as follows: @f[ f(x, y) = \min(x, y) @f]
+ FHT_MAX = 1, //< Binary maximum operation. The constant specifies the binary maximum operation
+ //< @f$ f @f$ that is defined as follows: @f[ f(x, y) = \max(x, y) @f]
+ FHT_ADD = 2, //< Binary addition operation. The constant specifies the binary addition operation
+ //< @f$ f @f$ that is defined as follows: @f[ f(x, y) = x + y @f]
+ FHT_AVE = 3 //< Binary average operation. The constant specifies the binary average operation
+ //< @f$ f @f$ that is defined as follows: @f[ f(x, y) = \frac{x + y}{2} @f]
+};
+
+/**
+* @brief Specifies to do or not to do skewing of Hough transform image
+* @details The enum specifies to do or not to do skewing of Hough transform image
+* so it would be no cycling in Hough transform image through borders of image.
+*/
+enum HoughDeskewOption
+{
+ HDO_RAW = 0, //< Use raw cyclic image
+ HDO_DESKEW = 1 //< Prepare deskewed image
+};
+
+/**
+ * @brief Specifies the degree of rules validation.
+ * @details The enum specifies the degree of rules validation. This can be used,
+ * for example, to choose a proper way of input arguments validation.
+ */
+typedef enum {
+ RO_STRICT = 0x00, ///< Validate each rule in a proper way.
+ RO_IGNORE_BORDERS = 0x01, ///< Skip validations of image borders.
+} RulesOption;
+
+/**
+* @brief Calculates 2D Fast Hough transform of an image.
+* @param dst The destination image, result of transformation.
+* @param src The source (input) image.
+* @param dstMatDepth The depth of destination image
+* @param op The operation to be applied, see cv::HoughOp
+* @param angleRange The part of Hough space to calculate, see cv::AngleRangeOption
+* @param makeSkew Specifies to do or not to do image skewing, see cv::HoughDeskewOption
+*
+* The function calculates the fast Hough transform for full, half or quarter
+* range of angles.
+*/
+CV_EXPORTS void FastHoughTransform( InputArray src,
+ OutputArray dst,
+ int dstMatDepth,
+ int angleRange = ARO_315_135,
+ int op = FHT_ADD,
+ int makeSkew = HDO_DESKEW );
+
+/**
+* @brief Calculates coordinates of line segment corresponded by point in Hough space.
+* @param houghPoint Point in Hough space.
+* @param srcImgInfo The source (input) image of Hough transform.
+* @param angleRange The part of Hough space where point is situated, see cv::AngleRangeOption
+* @param makeSkew Specifies to do or not to do image skewing, see cv::HoughDeskewOption
+* @param rules Specifies strictness of line segment calculating, see cv::RulesOption
+* @retval [Vec4i] Coordinates of line segment corresponded by point in Hough space.
+* @remarks If rules parameter set to RO_STRICT
+ then returned line cut along the border of source image.
+* @remarks If rules parameter set to RO_WEAK then in case of point, which belongs
+ the incorrect part of Hough image, returned line will not intersect source image.
+*
+* The function calculates coordinates of line segment corresponded by point in Hough space.
+*/
+CV_EXPORTS Vec4i HoughPoint2Line(const Point &houghPoint,
+ InputArray srcImgInfo,
+ int angleRange = ARO_315_135,
+ int makeSkew = HDO_DESKEW,
+ int rules = RO_IGNORE_BORDERS );
+
+} }// namespace cv::ximgproc
+
+#endif //__cplusplus
+#endif //__OPENCV_FAST_HOUGH_TRANSFORM_HPP__
diff --git a/thirdparty/linux/include/opencv2/ximgproc/fast_line_detector.hpp b/thirdparty/linux/include/opencv2/ximgproc/fast_line_detector.hpp
new file mode 100644
index 0000000..1df5558
--- /dev/null
+++ b/thirdparty/linux/include/opencv2/ximgproc/fast_line_detector.hpp
@@ -0,0 +1,81 @@
+// This file is part of OpenCV project.
+// It is subject to the license terms in the LICENSE file found in the top-level directory
+// of this distribution and at http://opencv.org/license.html.
+
+#ifndef __OPENCV_FAST_LINE_DETECTOR_HPP__
+#define __OPENCV_FAST_LINE_DETECTOR_HPP__
+
+#include <opencv2/core.hpp>
+
+namespace cv
+{
+namespace ximgproc
+{
+
+//! @addtogroup ximgproc_fast_line_detector
+//! @{
+
+/** @brief Class implementing the FLD (Fast Line Detector) algorithm described
+in @cite Lee14 .
+*/
+
+//! @include samples/fld_lines.cpp
+
+class CV_EXPORTS_W FastLineDetector : public Algorithm
+{
+public:
+ /** @example fld_lines.cpp
+ An example using the FastLineDetector
+ */
+ /** @brief Finds lines in the input image.
+ This is the output of the default parameters of the algorithm on the above
+ shown image.
+
+ ![image](pics/corridor_fld.jpg)
+
+ @param _image A grayscale (CV_8UC1) input image. If only a roi needs to be
+ selected, use: `fld_ptr-\>detect(image(roi), lines, ...);
+ lines += Scalar(roi.x, roi.y, roi.x, roi.y);`
+ @param _lines A vector of Vec4f elements specifying the beginning
+ and ending point of a line. Where Vec4f is (x1, y1, x2, y2), point
+ 1 is the start, point 2 - end. Returned lines are directed so that the
+ brighter side is on their left.
+ */
+ CV_WRAP virtual void detect(InputArray _image, OutputArray _lines) = 0;
+
+ /** @brief Draws the line segments on a given image.
+ @param _image The image, where the lines will be drawn. Should be bigger
+ or equal to the image, where the lines were found.
+ @param lines A vector of the lines that needed to be drawn.
+ @param draw_arrow If true, arrow heads will be drawn.
+ */
+ CV_WRAP virtual void drawSegments(InputOutputArray _image, InputArray lines,
+ bool draw_arrow = false) = 0;
+
+ virtual ~FastLineDetector() { }
+};
+
+/** @brief Creates a smart pointer to a FastLineDetector object and initializes it
+
+@param _length_threshold 10 - Segment shorter than this will be discarded
+@param _distance_threshold 1.41421356 - A point placed from a hypothesis line
+ segment farther than this will be
+ regarded as an outlier
+@param _canny_th1 50 - First threshold for
+ hysteresis procedure in Canny()
+@param _canny_th2 50 - Second threshold for
+ hysteresis procedure in Canny()
+@param _canny_aperture_size 3 - Aperturesize for the sobel
+ operator in Canny()
+@param _do_merge false - If true, incremental merging of segments
+ will be perfomred
+*/
+CV_EXPORTS_W Ptr<FastLineDetector> createFastLineDetector(
+ int _length_threshold = 10, float _distance_threshold = 1.414213562f,
+ double _canny_th1 = 50.0, double _canny_th2 = 50.0, int _canny_aperture_size = 3,
+ bool _do_merge = false);
+
+//! @} ximgproc_fast_line_detector
+}
+}
+#endif
diff --git a/thirdparty/linux/include/opencv2/ximgproc/lsc.hpp b/thirdparty/linux/include/opencv2/ximgproc/lsc.hpp
new file mode 100644
index 0000000..e6f5bae
--- /dev/null
+++ b/thirdparty/linux/include/opencv2/ximgproc/lsc.hpp
@@ -0,0 +1,157 @@
+/*********************************************************************
+ * Software License Agreement (BSD License)
+ *
+ * Copyright (c) 2014, 2015
+ * Zhengqin Li <li-zq12 at mails dot tsinghua dot edu dot cn>
+ * Jiansheng Chen <jschenthu at mail dot tsinghua dot edu dot cn>
+ * Tsinghua University
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *********************************************************************/
+
+/*
+
+ "Superpixel Segmentation using Linear Spectral Clustering"
+ Zhengqin Li, Jiansheng Chen, IEEE Conference on Computer Vision and Pattern
+ Recognition (CVPR), Jun. 2015
+
+ OpenCV port by: Cristian Balint <cristian dot balint at gmail dot com>
+ */
+
+#ifndef __OPENCV_LSC_HPP__
+#define __OPENCV_LSC_HPP__
+#ifdef __cplusplus
+
+#include <opencv2/core.hpp>
+
+namespace cv
+{
+namespace ximgproc
+{
+
+//! @addtogroup ximgproc_superpixel
+//! @{
+
+/** @brief Class implementing the LSC (Linear Spectral Clustering) superpixels
+algorithm described in @cite LiCVPR2015LSC.
+
+LSC (Linear Spectral Clustering) produces compact and uniform superpixels with low
+computational costs. Basically, a normalized cuts formulation of the superpixel
+segmentation is adopted based on a similarity metric that measures the color
+similarity and space proximity between image pixels. LSC is of linear computational
+complexity and high memory efficiency and is able to preserve global properties of images
+
+ */
+
+class CV_EXPORTS_W SuperpixelLSC : public Algorithm
+{
+public:
+
+ /** @brief Calculates the actual amount of superpixels on a given segmentation computed
+ and stored in SuperpixelLSC object.
+ */
+ CV_WRAP virtual int getNumberOfSuperpixels() const = 0;
+
+ /** @brief Calculates the superpixel segmentation on a given image with the initialized
+ parameters in the SuperpixelLSC object.
+
+ This function can be called again without the need of initializing the algorithm with
+ createSuperpixelLSC(). This save the computational cost of allocating memory for all the
+ structures of the algorithm.
+
+ @param num_iterations Number of iterations. Higher number improves the result.
+
+ The function computes the superpixels segmentation of an image with the parameters initialized
+ with the function createSuperpixelLSC(). The algorithms starts from a grid of superpixels and
+ then refines the boundaries by proposing updates of edges boundaries.
+
+ */
+ CV_WRAP virtual void iterate( int num_iterations = 10 ) = 0;
+
+ /** @brief Returns the segmentation labeling of the image.
+
+ Each label represents a superpixel, and each pixel is assigned to one superpixel label.
+
+ @param labels_out Return: A CV_32SC1 integer array containing the labels of the superpixel
+ segmentation. The labels are in the range [0, getNumberOfSuperpixels()].
+
+ The function returns an image with the labels of the superpixel segmentation. The labels are in
+ the range [0, getNumberOfSuperpixels()].
+ */
+ CV_WRAP virtual void getLabels( OutputArray labels_out ) const = 0;
+
+ /** @brief Returns the mask of the superpixel segmentation stored in SuperpixelLSC object.
+
+ @param image Return: CV_8U1 image mask where -1 indicates that the pixel is a superpixel border,
+ and 0 otherwise.
+
+ @param thick_line If false, the border is only one pixel wide, otherwise all pixels at the border
+ are masked.
+
+ The function return the boundaries of the superpixel segmentation.
+ */
+ CV_WRAP virtual void getLabelContourMask( OutputArray image, bool thick_line = true ) const = 0;
+
+ /** @brief Enforce label connectivity.
+
+ @param min_element_size The minimum element size in percents that should be absorbed into a bigger
+ superpixel. Given resulted average superpixel size valid value should be in 0-100 range, 25 means
+ that less then a quarter sized superpixel should be absorbed, this is default.
+
+ The function merge component that is too small, assigning the previously found adjacent label
+ to this component. Calling this function may change the final number of superpixels.
+ */
+ CV_WRAP virtual void enforceLabelConnectivity( int min_element_size = 20 ) = 0;
+
+
+};
+
+/** @brief Class implementing the LSC (Linear Spectral Clustering) superpixels
+
+@param image Image to segment
+@param region_size Chooses an average superpixel size measured in pixels
+@param ratio Chooses the enforcement of superpixel compactness factor of superpixel
+
+The function initializes a SuperpixelLSC object for the input image. It sets the parameters of
+superpixel algorithm, which are: region_size and ruler. It preallocate some buffers for future
+computing iterations over the given image. An example of LSC is ilustrated in the following picture.
+For enanched results it is recommended for color images to preprocess image with little gaussian blur
+with a small 3 x 3 kernel and additional conversion into CieLAB color space.
+
+![image](pics/superpixels_lsc.png)
+
+ */
+
+ CV_EXPORTS_W Ptr<SuperpixelLSC> createSuperpixelLSC( InputArray image, int region_size = 10, float ratio = 0.075f );
+
+//! @}
+
+}
+}
+#endif
+#endif
diff --git a/thirdparty/linux/include/opencv2/ximgproc/paillou_filter.hpp b/thirdparty/linux/include/opencv2/ximgproc/paillou_filter.hpp
new file mode 100644
index 0000000..03754a1
--- /dev/null
+++ b/thirdparty/linux/include/opencv2/ximgproc/paillou_filter.hpp
@@ -0,0 +1,67 @@
+/*
+ * By downloading, copying, installing or using the software you agree to this license.
+ * If you do not agree to this license, do not download, install,
+ * copy or use the software.
+ *
+ *
+ * License Agreement
+ * For Open Source Computer Vision Library
+ * (3 - clause BSD License)
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met :
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and / or other materials provided with the distribution.
+ *
+ * * Neither the names of the copyright holders nor the names of the contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * This software is provided by the copyright holders and contributors "as is" and
+ * any express or implied warranties, including, but not limited to, the implied
+ * warranties of merchantability and fitness for a particular purpose are disclaimed.
+ * In no event shall copyright holders or contributors be liable for any direct,
+ * indirect, incidental, special, exemplary, or consequential damages
+ * (including, but not limited to, procurement of substitute goods or services;
+ * loss of use, data, or profits; or business interruption) however caused
+ * and on any theory of liability, whether in contract, strict liability,
+ * or tort(including negligence or otherwise) arising in any way out of
+ * the use of this software, even if advised of the possibility of such damage.
+ */
+
+#ifndef __OPENCV_PAILLOUFILTER_HPP__
+#define __OPENCV_PAILLOUFILTER_HPP__
+#ifdef __cplusplus
+
+#include <opencv2/core.hpp>
+
+namespace cv {
+namespace ximgproc {
+
+//! @addtogroup ximgproc_filters
+//! @{
+
+/**
+* @brief Applies Paillou filter to an image.
+*
+* For more details about this implementation, please see @cite paillou1997detecting
+*
+* @param op Source CV_8U(S) or CV_16U(S), 1-channel or 3-channels image.
+* @param _dst result CV_32F image with same number of channel than op.
+* @param omega double see paper
+* @param alpha double see paper
+*
+* @sa GradientPaillouX, GradientPaillouY
+*/
+CV_EXPORTS void GradientPaillouY(InputArray op, OutputArray _dst, double alpha, double omega);
+CV_EXPORTS void GradientPaillouX(InputArray op, OutputArray _dst, double alpha, double omega);
+
+}
+}
+#endif
+#endif
diff --git a/thirdparty/linux/include/opencv2/ximgproc/seeds.hpp b/thirdparty/linux/include/opencv2/ximgproc/seeds.hpp
new file mode 100644
index 0000000..4db8b8f
--- /dev/null
+++ b/thirdparty/linux/include/opencv2/ximgproc/seeds.hpp
@@ -0,0 +1,183 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2014, Beat Kueng (beat-kueng@gmx.net), Lukas Vogel, Morten Lysgaard
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_SEEDS_HPP__
+#define __OPENCV_SEEDS_HPP__
+#ifdef __cplusplus
+
+#include <opencv2/core.hpp>
+
+namespace cv
+{
+namespace ximgproc
+{
+
+//! @addtogroup ximgproc_superpixel
+//! @{
+
+/** @brief Class implementing the SEEDS (Superpixels Extracted via Energy-Driven Sampling) superpixels
+algorithm described in @cite VBRV14 .
+
+The algorithm uses an efficient hill-climbing algorithm to optimize the superpixels' energy
+function that is based on color histograms and a boundary term, which is optional. The energy
+function encourages superpixels to be of the same color, and if the boundary term is activated, the
+superpixels have smooth boundaries and are of similar shape. In practice it starts from a regular
+grid of superpixels and moves the pixels or blocks of pixels at the boundaries to refine the
+solution. The algorithm runs in real-time using a single CPU.
+ */
+class CV_EXPORTS_W SuperpixelSEEDS : public Algorithm
+{
+public:
+
+ /** @brief Calculates the superpixel segmentation on a given image stored in SuperpixelSEEDS object.
+
+ The function computes the superpixels segmentation of an image with the parameters initialized
+ with the function createSuperpixelSEEDS().
+ */
+ CV_WRAP virtual int getNumberOfSuperpixels() = 0;
+
+ /** @brief Calculates the superpixel segmentation on a given image with the initialized
+ parameters in the SuperpixelSEEDS object.
+
+ This function can be called again for other images without the need of initializing the
+ algorithm with createSuperpixelSEEDS(). This save the computational cost of allocating memory
+ for all the structures of the algorithm.
+
+ @param img Input image. Supported formats: CV_8U, CV_16U, CV_32F. Image size & number of
+ channels must match with the initialized image size & channels with the function
+ createSuperpixelSEEDS(). It should be in HSV or Lab color space. Lab is a bit better, but also
+ slower.
+
+ @param num_iterations Number of pixel level iterations. Higher number improves the result.
+
+ The function computes the superpixels segmentation of an image with the parameters initialized
+ with the function createSuperpixelSEEDS(). The algorithms starts from a grid of superpixels and
+ then refines the boundaries by proposing updates of blocks of pixels that lie at the boundaries
+ from large to smaller size, finalizing with proposing pixel updates. An illustrative example
+ can be seen below.
+
+ ![image](pics/superpixels_blocks2.png)
+ */
+ CV_WRAP virtual void iterate(InputArray img, int num_iterations=4) = 0;
+
+ /** @brief Returns the segmentation labeling of the image.
+
+ Each label represents a superpixel, and each pixel is assigned to one superpixel label.
+
+ @param labels_out Return: A CV_32UC1 integer array containing the labels of the superpixel
+ segmentation. The labels are in the range [0, getNumberOfSuperpixels()].
+
+ The function returns an image with ssthe labels of the superpixel segmentation. The labels are in
+ the range [0, getNumberOfSuperpixels()].
+ */
+ CV_WRAP virtual void getLabels(OutputArray labels_out) = 0;
+
+ /** @brief Returns the mask of the superpixel segmentation stored in SuperpixelSEEDS object.
+
+ @param image Return: CV_8UC1 image mask where -1 indicates that the pixel is a superpixel border,
+ and 0 otherwise.
+
+ @param thick_line If false, the border is only one pixel wide, otherwise all pixels at the border
+ are masked.
+
+ The function return the boundaries of the superpixel segmentation.
+
+ @note
+ - (Python) A demo on how to generate superpixels in images from the webcam can be found at
+ opencv_source_code/samples/python2/seeds.py
+ - (cpp) A demo on how to generate superpixels in images from the webcam can be found at
+ opencv_source_code/modules/ximgproc/samples/seeds.cpp. By adding a file image as a command
+ line argument, the static image will be used instead of the webcam.
+ - It will show a window with the video from the webcam with the superpixel boundaries marked
+ in red (see below). Use Space to switch between different output modes. At the top of the
+ window there are 4 sliders, from which the user can change on-the-fly the number of
+ superpixels, the number of block levels, the strength of the boundary prior term to modify
+ the shape, and the number of iterations at pixel level. This is useful to play with the
+ parameters and set them to the user convenience. In the console the frame-rate of the
+ algorithm is indicated.
+
+ ![image](pics/superpixels_demo.png)
+ */
+ CV_WRAP virtual void getLabelContourMask(OutputArray image, bool thick_line = false) = 0;
+
+ virtual ~SuperpixelSEEDS() {}
+};
+
+/** @brief Initializes a SuperpixelSEEDS object.
+
+@param image_width Image width.
+@param image_height Image height.
+@param image_channels Number of channels of the image.
+@param num_superpixels Desired number of superpixels. Note that the actual number may be smaller
+due to restrictions (depending on the image size and num_levels). Use getNumberOfSuperpixels() to
+get the actual number.
+@param num_levels Number of block levels. The more levels, the more accurate is the segmentation,
+but needs more memory and CPU time.
+@param prior enable 3x3 shape smoothing term if \>0. A larger value leads to smoother shapes. prior
+must be in the range [0, 5].
+@param histogram_bins Number of histogram bins.
+@param double_step If true, iterate each block level twice for higher accuracy.
+
+The function initializes a SuperpixelSEEDS object for the input image. It stores the parameters of
+the image: image_width, image_height and image_channels. It also sets the parameters of the SEEDS
+superpixel algorithm, which are: num_superpixels, num_levels, use_prior, histogram_bins and
+double_step.
+
+The number of levels in num_levels defines the amount of block levels that the algorithm use in the
+optimization. The initialization is a grid, in which the superpixels are equally distributed through
+the width and the height of the image. The larger blocks correspond to the superpixel size, and the
+levels with smaller blocks are formed by dividing the larger blocks into 2 x 2 blocks of pixels,
+recursively until the smaller block level. An example of initialization of 4 block levels is
+illustrated in the following figure.
+
+![image](pics/superpixels_blocks.png)
+ */
+CV_EXPORTS_W Ptr<SuperpixelSEEDS> createSuperpixelSEEDS(
+ int image_width, int image_height, int image_channels,
+ int num_superpixels, int num_levels, int prior = 2,
+ int histogram_bins=5, bool double_step = false);
+
+//! @}
+
+}
+}
+#endif
+#endif
diff --git a/thirdparty/linux/include/opencv2/ximgproc/segmentation.hpp b/thirdparty/linux/include/opencv2/ximgproc/segmentation.hpp
new file mode 100644
index 0000000..02d28bf
--- /dev/null
+++ b/thirdparty/linux/include/opencv2/ximgproc/segmentation.hpp
@@ -0,0 +1,252 @@
+/*
+By downloading, copying, installing or using the software you agree to this
+license. If you do not agree to this license, do not download, install,
+copy or use the software.
+ License Agreement
+ For Open Source Computer Vision Library
+ (3-clause BSD License)
+Copyright (C) 2013, OpenCV Foundation, all rights reserved.
+Third party copyrights are property of their respective owners.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+ * Neither the names of the copyright holders nor the names of the contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+This software is provided by the copyright holders and contributors "as is" and
+any express or implied warranties, including, but not limited to, the implied
+warranties of merchantability and fitness for a particular purpose are
+disclaimed. In no event shall copyright holders or contributors be liable for
+any direct, indirect, incidental, special, exemplary, or consequential damages
+(including, but not limited to, procurement of substitute goods or services;
+loss of use, data, or profits; or business interruption) however caused
+and on any theory of liability, whether in contract, strict liability,
+or tort (including negligence or otherwise) arising in any way out of
+the use of this software, even if advised of the possibility of such damage.
+*/
+
+#ifndef __OPENCV_XIMGPROC_SEGMENTATION_HPP__
+#define __OPENCV_XIMGPROC_SEGMENTATION_HPP__
+
+#include <opencv2/core.hpp>
+
+namespace cv {
+ namespace ximgproc {
+ namespace segmentation {
+ //! @addtogroup ximgproc_segmentation
+ //! @{
+
+ /** @brief Graph Based Segmentation Algorithm.
+ The class implements the algorithm described in @cite PFF2004 .
+ */
+ class CV_EXPORTS_W GraphSegmentation : public Algorithm {
+ public:
+ /** @brief Segment an image and store output in dst
+ @param src The input image. Any number of channel (1 (Eg: Gray), 3 (Eg: RGB), 4 (Eg: RGB-D)) can be provided
+ @param dst The output segmentation. It's a CV_32SC1 Mat with the same number of cols and rows as input image, with an unique, sequential, id for each pixel.
+ */
+ CV_WRAP virtual void processImage(InputArray src, OutputArray dst) = 0;
+
+ CV_WRAP virtual void setSigma(double sigma) = 0;
+ CV_WRAP virtual double getSigma() = 0;
+
+ CV_WRAP virtual void setK(float k) = 0;
+ CV_WRAP virtual float getK() = 0;
+
+ CV_WRAP virtual void setMinSize(int min_size) = 0;
+ CV_WRAP virtual int getMinSize() = 0;
+ };
+
+ /** @brief Creates a graph based segmentor
+ @param sigma The sigma parameter, used to smooth image
+ @param k The k parameter of the algorythm
+ @param min_size The minimum size of segments
+ */
+ CV_EXPORTS_W Ptr<GraphSegmentation> createGraphSegmentation(double sigma=0.5, float k=300, int min_size=100);
+
+ /** @brief Strategie for the selective search segmentation algorithm
+ The class implements a generic stragery for the algorithm described in @cite uijlings2013selective.
+ */
+ class CV_EXPORTS_W SelectiveSearchSegmentationStrategy : public Algorithm {
+ public:
+ /** @brief Set a initial image, with a segementation.
+ @param img The input image. Any number of channel can be provided
+ @param regions A segementation of the image. The parameter must be the same size of img.
+ @param sizes The sizes of different regions
+ @param image_id If not set to -1, try to cache pre-computations. If the same set og (img, regions, size) is used, the image_id need to be the same.
+ */
+ CV_WRAP virtual void setImage(InputArray img, InputArray regions, InputArray sizes, int image_id = -1) = 0;
+
+ /** @brief Return the score between two regions (between 0 and 1)
+ @param r1 The first region
+ @param r2 The second region
+ */
+ CV_WRAP virtual float get(int r1, int r2) = 0;
+
+ /** @brief Inform the strategy that two regions will be merged
+ @param r1 The first region
+ @param r2 The second region
+ */
+ CV_WRAP virtual void merge(int r1, int r2) = 0;
+ };
+
+ /** @brief Color-based strategy for the selective search segmentation algorithm
+ The class is implemented from the algorithm described in @cite uijlings2013selective.
+ */
+ class CV_EXPORTS_W SelectiveSearchSegmentationStrategyColor : public SelectiveSearchSegmentationStrategy {
+ };
+
+ /** @brief Create a new color-based strategy */
+ CV_EXPORTS_W Ptr<SelectiveSearchSegmentationStrategyColor> createSelectiveSearchSegmentationStrategyColor();
+
+ /** @brief Size-based strategy for the selective search segmentation algorithm
+ The class is implemented from the algorithm described in @cite uijlings2013selective.
+ */
+ class CV_EXPORTS_W SelectiveSearchSegmentationStrategySize : public SelectiveSearchSegmentationStrategy {
+ };
+
+ /** @brief Create a new size-based strategy */
+ CV_EXPORTS_W Ptr<SelectiveSearchSegmentationStrategySize> createSelectiveSearchSegmentationStrategySize();
+
+ /** @brief Texture-based strategy for the selective search segmentation algorithm
+ The class is implemented from the algorithm described in @cite uijlings2013selective.
+ */
+ class CV_EXPORTS_W SelectiveSearchSegmentationStrategyTexture : public SelectiveSearchSegmentationStrategy {
+ };
+
+ /** @brief Create a new size-based strategy */
+ CV_EXPORTS_W Ptr<SelectiveSearchSegmentationStrategyTexture> createSelectiveSearchSegmentationStrategyTexture();
+
+ /** @brief Fill-based strategy for the selective search segmentation algorithm
+ The class is implemented from the algorithm described in @cite uijlings2013selective.
+ */
+ class CV_EXPORTS_W SelectiveSearchSegmentationStrategyFill : public SelectiveSearchSegmentationStrategy {
+ };
+
+ /** @brief Create a new fill-based strategy */
+ CV_EXPORTS_W Ptr<SelectiveSearchSegmentationStrategyFill> createSelectiveSearchSegmentationStrategyFill();
+
+ /** @brief Regroup multiple strategies for the selective search segmentation algorithm
+ */
+ class CV_EXPORTS_W SelectiveSearchSegmentationStrategyMultiple : public SelectiveSearchSegmentationStrategy {
+ public:
+
+ /** @brief Add a new sub-strategy
+ @param g The strategy
+ @param weight The weight of the strategy
+ */
+ CV_WRAP virtual void addStrategy(Ptr<SelectiveSearchSegmentationStrategy> g, float weight) = 0;
+ /** @brief Remove all sub-strategies
+ */
+ CV_WRAP virtual void clearStrategies() = 0;
+ };
+
+ /** @brief Create a new multiple strategy */
+ CV_EXPORTS_W Ptr<SelectiveSearchSegmentationStrategyMultiple> createSelectiveSearchSegmentationStrategyMultiple();
+
+ /** @brief Create a new multiple strategy and set one subtrategy
+ @param s1 The first strategy
+ */
+ CV_EXPORTS_W Ptr<SelectiveSearchSegmentationStrategyMultiple> createSelectiveSearchSegmentationStrategyMultiple(Ptr<SelectiveSearchSegmentationStrategy> s1);
+
+ /** @brief Create a new multiple strategy and set two subtrategies, with equal weights
+ @param s1 The first strategy
+ @param s2 The second strategy
+ */
+ CV_EXPORTS_W Ptr<SelectiveSearchSegmentationStrategyMultiple> createSelectiveSearchSegmentationStrategyMultiple(Ptr<SelectiveSearchSegmentationStrategy> s1, Ptr<SelectiveSearchSegmentationStrategy> s2);
+
+
+ /** @brief Create a new multiple strategy and set three subtrategies, with equal weights
+ @param s1 The first strategy
+ @param s2 The second strategy
+ @param s3 The third strategy
+ */
+ CV_EXPORTS_W Ptr<SelectiveSearchSegmentationStrategyMultiple> createSelectiveSearchSegmentationStrategyMultiple(Ptr<SelectiveSearchSegmentationStrategy> s1, Ptr<SelectiveSearchSegmentationStrategy> s2, Ptr<SelectiveSearchSegmentationStrategy> s3);
+
+ /** @brief Create a new multiple strategy and set four subtrategies, with equal weights
+ @param s1 The first strategy
+ @param s2 The second strategy
+ @param s3 The third strategy
+ @param s4 The forth strategy
+ */
+ CV_EXPORTS_W Ptr<SelectiveSearchSegmentationStrategyMultiple> createSelectiveSearchSegmentationStrategyMultiple(Ptr<SelectiveSearchSegmentationStrategy> s1, Ptr<SelectiveSearchSegmentationStrategy> s2, Ptr<SelectiveSearchSegmentationStrategy> s3, Ptr<SelectiveSearchSegmentationStrategy> s4);
+
+ /** @brief Selective search segmentation algorithm
+ The class implements the algorithm described in @cite uijlings2013selective.
+ */
+ class CV_EXPORTS_W SelectiveSearchSegmentation : public Algorithm {
+ public:
+
+ /** @brief Set a image used by switch* functions to initialize the class
+ @param img The image
+ */
+ CV_WRAP virtual void setBaseImage(InputArray img) = 0;
+
+ /** @brief Initialize the class with the 'Single stragegy' parameters describled in @cite uijlings2013selective.
+ @param k The k parameter for the graph segmentation
+ @param sigma The sigma parameter for the graph segmentation
+ */
+ CV_WRAP virtual void switchToSingleStrategy(int k = 200, float sigma = 0.8f) = 0;
+
+ /** @brief Initialize the class with the 'Selective search fast' parameters describled in @cite uijlings2013selective.
+ @param base_k The k parameter for the first graph segmentation
+ @param inc_k The increment of the k parameter for all graph segmentations
+ @param sigma The sigma parameter for the graph segmentation
+ */
+ CV_WRAP virtual void switchToSelectiveSearchFast(int base_k = 150, int inc_k = 150, float sigma = 0.8f) = 0;
+
+ /** @brief Initialize the class with the 'Selective search fast' parameters describled in @cite uijlings2013selective.
+ @param base_k The k parameter for the first graph segmentation
+ @param inc_k The increment of the k parameter for all graph segmentations
+ @param sigma The sigma parameter for the graph segmentation
+ */
+ CV_WRAP virtual void switchToSelectiveSearchQuality(int base_k = 150, int inc_k = 150, float sigma = 0.8f) = 0;
+
+ /** @brief Add a new image in the list of images to process.
+ @param img The image
+ */
+ CV_WRAP virtual void addImage(InputArray img) = 0;
+
+ /** @brief Clear the list of images to process
+ */
+ CV_WRAP virtual void clearImages() = 0;
+
+ /** @brief Add a new graph segmentation in the list of graph segementations to process.
+ @param g The graph segmentation
+ */
+ CV_WRAP virtual void addGraphSegmentation(Ptr<GraphSegmentation> g) = 0;
+
+ /** @brief Clear the list of graph segmentations to process;
+ */
+ CV_WRAP virtual void clearGraphSegmentations() = 0;
+
+ /** @brief Add a new strategy in the list of strategy to process.
+ @param s The strategy
+ */
+ CV_WRAP virtual void addStrategy(Ptr<SelectiveSearchSegmentationStrategy> s) = 0;
+
+ /** @brief Clear the list of strategy to process;
+ */
+ CV_WRAP virtual void clearStrategies() = 0;
+
+ /** @brief Based on all images, graph segmentations and stragies, computes all possible rects and return them
+ @param rects The list of rects. The first ones are more relevents than the lasts ones.
+ */
+ CV_WRAP virtual void process(std::vector<Rect>& rects) = 0;
+ };
+
+ /** @brief Create a new SelectiveSearchSegmentation class.
+ */
+ CV_EXPORTS_W Ptr<SelectiveSearchSegmentation> createSelectiveSearchSegmentation();
+
+ //! @}
+
+ }
+ }
+}
+
+#endif
diff --git a/thirdparty/linux/include/opencv2/ximgproc/slic.hpp b/thirdparty/linux/include/opencv2/ximgproc/slic.hpp
new file mode 100644
index 0000000..8b99a65
--- /dev/null
+++ b/thirdparty/linux/include/opencv2/ximgproc/slic.hpp
@@ -0,0 +1,168 @@
+/*********************************************************************
+ * Software License Agreement (BSD License)
+ *
+ * Copyright (c) 2013
+ * Radhakrishna Achanta
+ * email : Radhakrishna [dot] Achanta [at] epfl [dot] ch
+ * web : http://ivrl.epfl.ch/people/achanta
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *********************************************************************/
+
+/*
+ "SLIC Superpixels Compared to State-of-the-art Superpixel Methods"
+ Radhakrishna Achanta, Appu Shaji, Kevin Smith, Aurelien Lucchi, Pascal Fua,
+ and Sabine Susstrunk, IEEE TPAMI, Volume 34, Issue 11, Pages 2274-2282,
+ November 2012.
+
+ "SLIC Superpixels" Radhakrishna Achanta, Appu Shaji, Kevin Smith,
+ Aurelien Lucchi, Pascal Fua, and Sabine Süsstrunk, EPFL Technical
+ Report no. 149300, June 2010.
+
+ OpenCV port by: Cristian Balint <cristian dot balint at gmail dot com>
+ */
+
+#ifndef __OPENCV_SLIC_HPP__
+#define __OPENCV_SLIC_HPP__
+#ifdef __cplusplus
+
+#include <opencv2/core.hpp>
+
+namespace cv
+{
+namespace ximgproc
+{
+
+//! @addtogroup ximgproc_superpixel
+//! @{
+
+ enum SLIC { SLIC = 100, SLICO = 101, MSLIC = 102 };
+
+/** @brief Class implementing the SLIC (Simple Linear Iterative Clustering) superpixels
+algorithm described in @cite Achanta2012.
+
+SLIC (Simple Linear Iterative Clustering) clusters pixels using pixel channels and image plane space
+to efficiently generate compact, nearly uniform superpixels. The simplicity of approach makes it
+extremely easy to use a lone parameter specifies the number of superpixels and the efficiency of
+the algorithm makes it very practical.
+Several optimizations are available for SLIC class:
+SLICO stands for "Zero parameter SLIC" and it is an optimization of baseline SLIC descibed in @cite Achanta2012.
+MSLIC stands for "Manifold SLIC" and it is an optimization of baseline SLIC described in @cite Liu_2016_CVPR.
+ */
+
+class CV_EXPORTS_W SuperpixelSLIC : public Algorithm
+{
+public:
+
+ /** @brief Calculates the actual amount of superpixels on a given segmentation computed
+ and stored in SuperpixelSLIC object.
+ */
+ CV_WRAP virtual int getNumberOfSuperpixels() const = 0;
+
+ /** @brief Calculates the superpixel segmentation on a given image with the initialized
+ parameters in the SuperpixelSLIC object.
+
+ This function can be called again without the need of initializing the algorithm with
+ createSuperpixelSLIC(). This save the computational cost of allocating memory for all the
+ structures of the algorithm.
+
+ @param num_iterations Number of iterations. Higher number improves the result.
+
+ The function computes the superpixels segmentation of an image with the parameters initialized
+ with the function createSuperpixelSLIC(). The algorithms starts from a grid of superpixels and
+ then refines the boundaries by proposing updates of edges boundaries.
+
+ */
+ CV_WRAP virtual void iterate( int num_iterations = 10 ) = 0;
+
+ /** @brief Returns the segmentation labeling of the image.
+
+ Each label represents a superpixel, and each pixel is assigned to one superpixel label.
+
+ @param labels_out Return: A CV_32SC1 integer array containing the labels of the superpixel
+ segmentation. The labels are in the range [0, getNumberOfSuperpixels()].
+
+ The function returns an image with the labels of the superpixel segmentation. The labels are in
+ the range [0, getNumberOfSuperpixels()].
+ */
+ CV_WRAP virtual void getLabels( OutputArray labels_out ) const = 0;
+
+ /** @brief Returns the mask of the superpixel segmentation stored in SuperpixelSLIC object.
+
+ @param image Return: CV_8U1 image mask where -1 indicates that the pixel is a superpixel border,
+ and 0 otherwise.
+
+ @param thick_line If false, the border is only one pixel wide, otherwise all pixels at the border
+ are masked.
+
+ The function return the boundaries of the superpixel segmentation.
+ */
+ CV_WRAP virtual void getLabelContourMask( OutputArray image, bool thick_line = true ) const = 0;
+
+ /** @brief Enforce label connectivity.
+
+ @param min_element_size The minimum element size in percents that should be absorbed into a bigger
+ superpixel. Given resulted average superpixel size valid value should be in 0-100 range, 25 means
+ that less then a quarter sized superpixel should be absorbed, this is default.
+
+ The function merge component that is too small, assigning the previously found adjacent label
+ to this component. Calling this function may change the final number of superpixels.
+ */
+ CV_WRAP virtual void enforceLabelConnectivity( int min_element_size = 25 ) = 0;
+
+
+};
+
+/** @brief Initialize a SuperpixelSLIC object
+
+@param image Image to segment
+@param algorithm Chooses the algorithm variant to use:
+SLIC segments image using a desired region_size, and in addition SLICO will optimize using adaptive compactness factor,
+while MSLIC will optimize using manifold methods resulting in more content-sensitive superpixels.
+@param region_size Chooses an average superpixel size measured in pixels
+@param ruler Chooses the enforcement of superpixel smoothness factor of superpixel
+
+The function initializes a SuperpixelSLIC object for the input image. It sets the parameters of choosed
+superpixel algorithm, which are: region_size and ruler. It preallocate some buffers for future
+computing iterations over the given image. For enanched results it is recommended for color images to
+preprocess image with little gaussian blur using a small 3 x 3 kernel and additional conversion into
+CieLAB color space. An example of SLIC versus SLICO and MSLIC is ilustrated in the following picture.
+
+![image](pics/superpixels_slic.png)
+
+ */
+
+ CV_EXPORTS_W Ptr<SuperpixelSLIC> createSuperpixelSLIC( InputArray image, int algorithm = SLICO,
+ int region_size = 10, float ruler = 10.0f );
+
+//! @}
+
+}
+}
+#endif
+#endif
diff --git a/thirdparty/linux/include/opencv2/ximgproc/sparse_match_interpolator.hpp b/thirdparty/linux/include/opencv2/ximgproc/sparse_match_interpolator.hpp
new file mode 100644
index 0000000..fffcfd6
--- /dev/null
+++ b/thirdparty/linux/include/opencv2/ximgproc/sparse_match_interpolator.hpp
@@ -0,0 +1,132 @@
+/*
+ * By downloading, copying, installing or using the software you agree to this license.
+ * If you do not agree to this license, do not download, install,
+ * copy or use the software.
+ *
+ *
+ * License Agreement
+ * For Open Source Computer Vision Library
+ * (3 - clause BSD License)
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met :
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and / or other materials provided with the distribution.
+ *
+ * * Neither the names of the copyright holders nor the names of the contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * This software is provided by the copyright holders and contributors "as is" and
+ * any express or implied warranties, including, but not limited to, the implied
+ * warranties of merchantability and fitness for a particular purpose are disclaimed.
+ * In no event shall copyright holders or contributors be liable for any direct,
+ * indirect, incidental, special, exemplary, or consequential damages
+ * (including, but not limited to, procurement of substitute goods or services;
+ * loss of use, data, or profits; or business interruption) however caused
+ * and on any theory of liability, whether in contract, strict liability,
+ * or tort(including negligence or otherwise) arising in any way out of
+ * the use of this software, even if advised of the possibility of such damage.
+ */
+
+#ifndef __OPENCV_SPARSEMATCHINTERPOLATOR_HPP__
+#define __OPENCV_SPARSEMATCHINTERPOLATOR_HPP__
+#ifdef __cplusplus
+
+#include <opencv2/core.hpp>
+
+namespace cv {
+namespace ximgproc {
+
+//! @addtogroup ximgproc_filters
+//! @{
+
+/** @brief Main interface for all filters, that take sparse matches as an
+input and produce a dense per-pixel matching (optical flow) as an output.
+ */
+class CV_EXPORTS_W SparseMatchInterpolator : public Algorithm
+{
+public:
+ /** @brief Interpolate input sparse matches.
+
+ @param from_image first of the two matched images, 8-bit single-channel or three-channel.
+
+ @param from_points points of the from_image for which there are correspondences in the
+ to_image (Point2f vector, size shouldn't exceed 32767)
+
+ @param to_image second of the two matched images, 8-bit single-channel or three-channel.
+
+ @param to_points points in the to_image corresponding to from_points
+ (Point2f vector, size shouldn't exceed 32767)
+
+ @param dense_flow output dense matching (two-channel CV_32F image)
+ */
+ CV_WRAP virtual void interpolate(InputArray from_image, InputArray from_points,
+ InputArray to_image , InputArray to_points,
+ OutputArray dense_flow) = 0;
+};
+
+/** @brief Sparse match interpolation algorithm based on modified locally-weighted affine
+estimator from @cite Revaud2015 and Fast Global Smoother as post-processing filter.
+ */
+class CV_EXPORTS_W EdgeAwareInterpolator : public SparseMatchInterpolator
+{
+public:
+ /** @brief K is a number of nearest-neighbor matches considered, when fitting a locally affine
+ model. Usually it should be around 128. However, lower values would make the interpolation
+ noticeably faster.
+ */
+ CV_WRAP virtual void setK(int _k) = 0;
+ /** @see setK */
+ CV_WRAP virtual int getK() = 0;
+
+ /** @brief Sigma is a parameter defining how fast the weights decrease in the locally-weighted affine
+ fitting. Higher values can help preserve fine details, lower values can help to get rid of noise in the
+ output flow.
+ */
+ CV_WRAP virtual void setSigma(float _sigma) = 0;
+ /** @see setSigma */
+ CV_WRAP virtual float getSigma() = 0;
+
+ /** @brief Lambda is a parameter defining the weight of the edge-aware term in geodesic distance,
+ should be in the range of 0 to 1000.
+ */
+ CV_WRAP virtual void setLambda(float _lambda) = 0;
+ /** @see setLambda */
+ CV_WRAP virtual float getLambda() = 0;
+
+ /** @brief Sets whether the fastGlobalSmootherFilter() post-processing is employed. It is turned on by
+ default.
+ */
+ CV_WRAP virtual void setUsePostProcessing(bool _use_post_proc) = 0;
+ /** @see setUsePostProcessing */
+ CV_WRAP virtual bool getUsePostProcessing() = 0;
+
+ /** @brief Sets the respective fastGlobalSmootherFilter() parameter.
+ */
+ CV_WRAP virtual void setFGSLambda(float _lambda) = 0;
+ /** @see setFGSLambda */
+ CV_WRAP virtual float getFGSLambda() = 0;
+
+ /** @see setFGSLambda */
+ CV_WRAP virtual void setFGSSigma(float _sigma) = 0;
+ /** @see setFGSLambda */
+ CV_WRAP virtual float getFGSSigma() = 0;
+};
+
+/** @brief Factory method that creates an instance of the
+EdgeAwareInterpolator.
+*/
+CV_EXPORTS_W
+Ptr<EdgeAwareInterpolator> createEdgeAwareInterpolator();
+
+//! @}
+}
+}
+#endif
+#endif
diff --git a/thirdparty/linux/include/opencv2/ximgproc/structured_edge_detection.hpp b/thirdparty/linux/include/opencv2/ximgproc/structured_edge_detection.hpp
new file mode 100644
index 0000000..db6e906
--- /dev/null
+++ b/thirdparty/linux/include/opencv2/ximgproc/structured_edge_detection.hpp
@@ -0,0 +1,128 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_STRUCTURED_EDGE_DETECTION_HPP__
+#define __OPENCV_STRUCTURED_EDGE_DETECTION_HPP__
+#ifdef __cplusplus
+
+/** @file
+@date Jun 17, 2014
+@author Yury Gitman
+ */
+
+#include <opencv2/core.hpp>
+
+namespace cv
+{
+namespace ximgproc
+{
+
+//! @addtogroup ximgproc_edge
+//! @{
+
+/*!
+ Helper class for training part of [P. Dollar and C. L. Zitnick. Structured Forests for Fast Edge Detection, 2013].
+ */
+class CV_EXPORTS_W RFFeatureGetter : public Algorithm
+{
+public:
+
+ /*!
+ * This functions extracts feature channels from src.
+ * Than StructureEdgeDetection uses this feature space
+ * to detect edges.
+ *
+ * \param src : source image to extract features
+ * \param features : output n-channel floating point feature matrix.
+ *
+ * \param gnrmRad : __rf.options.gradientNormalizationRadius
+ * \param gsmthRad : __rf.options.gradientSmoothingRadius
+ * \param shrink : __rf.options.shrinkNumber
+ * \param outNum : __rf.options.numberOfOutputChannels
+ * \param gradNum : __rf.options.numberOfGradientOrientations
+ */
+ CV_WRAP virtual void getFeatures(const Mat &src, Mat &features,
+ const int gnrmRad,
+ const int gsmthRad,
+ const int shrink,
+ const int outNum,
+ const int gradNum) const = 0;
+};
+
+CV_EXPORTS_W Ptr<RFFeatureGetter> createRFFeatureGetter();
+
+
+
+/** @brief Class implementing edge detection algorithm from @cite Dollar2013 :
+ */
+class CV_EXPORTS_W StructuredEdgeDetection : public Algorithm
+{
+public:
+
+ /** @brief The function detects edges in src and draw them to dst.
+
+ The algorithm underlies this function is much more robust to texture presence, than common
+ approaches, e.g. Sobel
+ @param src source image (RGB, float, in [0;1]) to detect edges
+ @param dst destination image (grayscale, float, in [0;1]) where edges are drawn
+ @sa Sobel, Canny
+ */
+ CV_WRAP virtual void detectEdges(const Mat &src, CV_OUT Mat &dst) const = 0;
+};
+
+/*!
+* The only constructor
+*
+* \param model : name of the file where the model is stored
+* \param howToGetFeatures : optional object inheriting from RFFeatureGetter.
+* You need it only if you would like to train your
+* own forest, pass NULL otherwise
+*/
+CV_EXPORTS_W Ptr<StructuredEdgeDetection> createStructuredEdgeDetection(const String &model,
+ Ptr<const RFFeatureGetter> howToGetFeatures = Ptr<RFFeatureGetter>());
+
+//! @}
+
+}
+}
+#endif
+#endif /* __OPENCV_STRUCTURED_EDGE_DETECTION_HPP__ */
diff --git a/thirdparty/linux/include/opencv2/ximgproc/weighted_median_filter.hpp b/thirdparty/linux/include/opencv2/ximgproc/weighted_median_filter.hpp
new file mode 100644
index 0000000..30a169c
--- /dev/null
+++ b/thirdparty/linux/include/opencv2/ximgproc/weighted_median_filter.hpp
@@ -0,0 +1,95 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2015, The Chinese University of Hong Kong, all rights reserved.
+//
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_WEIGHTED_MEDIAN_FILTER_HPP__
+#define __OPENCV_WEIGHTED_MEDIAN_FILTER_HPP__
+#ifdef __cplusplus
+
+/**
+* @file
+* @date Sept 9, 2015
+* @author Zhou Chao
+*/
+
+#include <opencv2/core.hpp>
+#include <string>
+
+namespace cv
+{
+namespace ximgproc
+{
+
+/**
+* @brief Specifies weight types of weighted median filter.
+*/
+enum WMFWeightType
+{
+ WMF_EXP, //!< \f$exp(-|I1-I2|^2/(2*sigma^2))\f$
+ WMF_IV1, //!< \f$(|I1-I2|+sigma)^-1\f$
+ WMF_IV2, //!< \f$(|I1-I2|^2+sigma^2)^-1\f$
+ WMF_COS, //!< \f$dot(I1,I2)/(|I1|*|I2|)\f$
+ WMF_JAC, //!< \f$(min(r1,r2)+min(g1,g2)+min(b1,b2))/(max(r1,r2)+max(g1,g2)+max(b1,b2))\f$
+ WMF_OFF //!< unweighted
+};
+
+/**
+* @brief Applies weighted median filter to an image.
+*
+* For more details about this implementation, please see @cite zhang2014100+
+*
+* @param joint Joint 8-bit, 1-channel or 3-channel image.
+* @param src Source 8-bit or floating-point, 1-channel or 3-channel image.
+* @param dst Destination image.
+* @param r Radius of filtering kernel, should be a positive integer.
+* @param sigma Filter range standard deviation for the joint image.
+* @param weightType weightType The type of weight definition, see WMFWeightType
+* @param mask A 0-1 mask that has the same size with I. This mask is used to ignore the effect of some pixels. If the pixel value on mask is 0,
+* the pixel will be ignored when maintaining the joint-histogram. This is useful for applications like optical flow occlusion handling.
+*
+* @sa medianBlur, jointBilateralFilter
+*/
+CV_EXPORTS void weightedMedianFilter(InputArray joint, InputArray src, OutputArray dst, int r, double sigma=25.5, WMFWeightType weightType=WMF_EXP, Mat mask=Mat());
+}
+}
+
+#endif
+#endif