diff options
Diffstat (limited to 'thirdparty/raspberrypi/includes/opencv2/video')
3 files changed, 694 insertions, 0 deletions
diff --git a/thirdparty/raspberrypi/includes/opencv2/video/background_segm.hpp b/thirdparty/raspberrypi/includes/opencv2/video/background_segm.hpp new file mode 100644 index 0000000..d2d068c --- /dev/null +++ b/thirdparty/raspberrypi/includes/opencv2/video/background_segm.hpp @@ -0,0 +1,263 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_BACKGROUND_SEGM_HPP__ +#define __OPENCV_BACKGROUND_SEGM_HPP__ + +#include "opencv2/core/core.hpp" +#include <list> +namespace cv +{ + +/*! + The Base Class for Background/Foreground Segmentation + + The class is only used to define the common interface for + the whole family of background/foreground segmentation algorithms. +*/ +class CV_EXPORTS_W BackgroundSubtractor : public Algorithm +{ +public: + //! the virtual destructor + virtual ~BackgroundSubtractor(); + //! the update operator that takes the next video frame and returns the current foreground mask as 8-bit binary image. + CV_WRAP_AS(apply) virtual void operator()(InputArray image, OutputArray fgmask, + double learningRate=0); + + //! computes a background image + virtual void getBackgroundImage(OutputArray backgroundImage) const; +}; + + +/*! + Gaussian Mixture-based Backbround/Foreground Segmentation Algorithm + + The class implements the following algorithm: + "An improved adaptive background mixture model for real-time tracking with shadow detection" + P. KadewTraKuPong and R. Bowden, + Proc. 2nd European Workshp on Advanced Video-Based Surveillance Systems, 2001." + http://personal.ee.surrey.ac.uk/Personal/R.Bowden/publications/avbs01/avbs01.pdf + +*/ +class CV_EXPORTS_W BackgroundSubtractorMOG : public BackgroundSubtractor +{ +public: + //! the default constructor + CV_WRAP BackgroundSubtractorMOG(); + //! the full constructor that takes the length of the history, the number of gaussian mixtures, the background ratio parameter and the noise strength + CV_WRAP BackgroundSubtractorMOG(int history, int nmixtures, double backgroundRatio, double noiseSigma=0); + //! the destructor + virtual ~BackgroundSubtractorMOG(); + //! the update operator + virtual void operator()(InputArray image, OutputArray fgmask, double learningRate=0); + + //! re-initiaization method + virtual void initialize(Size frameSize, int frameType); + + virtual AlgorithmInfo* info() const; + +protected: + Size frameSize; + int frameType; + Mat bgmodel; + int nframes; + int history; + int nmixtures; + double varThreshold; + double backgroundRatio; + double noiseSigma; +}; + + +/*! + The class implements the following algorithm: + "Improved adaptive Gausian mixture model for background subtraction" + Z.Zivkovic + International Conference Pattern Recognition, UK, August, 2004. + http://www.zoranz.net/Publications/zivkovic2004ICPR.pdf +*/ +class CV_EXPORTS_W BackgroundSubtractorMOG2 : public BackgroundSubtractor +{ +public: + //! the default constructor + CV_WRAP BackgroundSubtractorMOG2(); + //! the full constructor that takes the length of the history, the number of gaussian mixtures, the background ratio parameter and the noise strength + CV_WRAP BackgroundSubtractorMOG2(int history, float varThreshold, bool bShadowDetection=true); + //! the destructor + virtual ~BackgroundSubtractorMOG2(); + //! the update operator + virtual void operator()(InputArray image, OutputArray fgmask, double learningRate=-1); + + //! computes a background image which are the mean of all background gaussians + virtual void getBackgroundImage(OutputArray backgroundImage) const; + + //! re-initiaization method + virtual void initialize(Size frameSize, int frameType); + + virtual AlgorithmInfo* info() const; + +protected: + Size frameSize; + int frameType; + Mat bgmodel; + Mat bgmodelUsedModes;//keep track of number of modes per pixel + int nframes; + int history; + int nmixtures; + //! here it is the maximum allowed number of mixture components. + //! Actual number is determined dynamically per pixel + double varThreshold; + // threshold on the squared Mahalanobis distance to decide if it is well described + // by the background model or not. Related to Cthr from the paper. + // This does not influence the update of the background. A typical value could be 4 sigma + // and that is varThreshold=4*4=16; Corresponds to Tb in the paper. + + ///////////////////////// + // less important parameters - things you might change but be carefull + //////////////////////// + float backgroundRatio; + // corresponds to fTB=1-cf from the paper + // TB - threshold when the component becomes significant enough to be included into + // the background model. It is the TB=1-cf from the paper. So I use cf=0.1 => TB=0. + // For alpha=0.001 it means that the mode should exist for approximately 105 frames before + // it is considered foreground + // float noiseSigma; + float varThresholdGen; + //correspondts to Tg - threshold on the squared Mahalan. dist. to decide + //when a sample is close to the existing components. If it is not close + //to any a new component will be generated. I use 3 sigma => Tg=3*3=9. + //Smaller Tg leads to more generated components and higher Tg might make + //lead to small number of components but they can grow too large + float fVarInit; + float fVarMin; + float fVarMax; + //initial variance for the newly generated components. + //It will will influence the speed of adaptation. A good guess should be made. + //A simple way is to estimate the typical standard deviation from the images. + //I used here 10 as a reasonable value + // min and max can be used to further control the variance + float fCT;//CT - complexity reduction prior + //this is related to the number of samples needed to accept that a component + //actually exists. We use CT=0.05 of all the samples. By setting CT=0 you get + //the standard Stauffer&Grimson algorithm (maybe not exact but very similar) + + //shadow detection parameters + bool bShadowDetection;//default 1 - do shadow detection + unsigned char nShadowDetection;//do shadow detection - insert this value as the detection result - 127 default value + float fTau; + // Tau - shadow threshold. The shadow is detected if the pixel is darker + //version of the background. Tau is a threshold on how much darker the shadow can be. + //Tau= 0.5 means that if pixel is more than 2 times darker then it is not shadow + //See: Prati,Mikic,Trivedi,Cucchiarra,"Detecting Moving Shadows...",IEEE PAMI,2003. +}; + +/** + * Background Subtractor module. Takes a series of images and returns a sequence of mask (8UC1) + * images of the same size, where 255 indicates Foreground and 0 represents Background. + * This class implements an algorithm described in "Visual Tracking of Human Visitors under + * Variable-Lighting Conditions for a Responsive Audio Art Installation," A. Godbehere, + * A. Matsukawa, K. Goldberg, American Control Conference, Montreal, June 2012. + */ +class CV_EXPORTS BackgroundSubtractorGMG: public cv::BackgroundSubtractor +{ +public: + BackgroundSubtractorGMG(); + virtual ~BackgroundSubtractorGMG(); + virtual AlgorithmInfo* info() const; + + /** + * Validate parameters and set up data structures for appropriate image size. + * Must call before running on data. + * @param frameSize input frame size + * @param min minimum value taken on by pixels in image sequence. Usually 0 + * @param max maximum value taken on by pixels in image sequence. e.g. 1.0 or 255 + */ + void initialize(cv::Size frameSize, double min, double max); + + /** + * Performs single-frame background subtraction and builds up a statistical background image + * model. + * @param image Input image + * @param fgmask Output mask image representing foreground and background pixels + * @param learningRate Determines how quickly features are "forgotten" from histograms + */ + virtual void operator()(InputArray image, OutputArray fgmask, double learningRate=-1.0); + + /** + * Releases all inner buffers. + */ + void release(); + + //! Total number of distinct colors to maintain in histogram. + int maxFeatures; + //! Set between 0.0 and 1.0, determines how quickly features are "forgotten" from histograms. + double learningRate; + //! Number of frames of video to use to initialize histograms. + int numInitializationFrames; + //! Number of discrete levels in each channel to be used in histograms. + int quantizationLevels; + //! Prior probability that any given pixel is a background pixel. A sensitivity parameter. + double backgroundPrior; + //! Value above which pixel is determined to be FG. + double decisionThreshold; + //! Smoothing radius, in pixels, for cleaning up FG image. + int smoothingRadius; + //! Perform background model update + bool updateBackgroundModel; + +private: + double maxVal_; + double minVal_; + + cv::Size frameSize_; + int frameNum_; + + cv::Mat_<int> nfeatures_; + cv::Mat_<unsigned int> colors_; + cv::Mat_<float> weights_; + + cv::Mat buf_; +}; + +} + +#endif diff --git a/thirdparty/raspberrypi/includes/opencv2/video/tracking.hpp b/thirdparty/raspberrypi/includes/opencv2/video/tracking.hpp new file mode 100644 index 0000000..f09be80 --- /dev/null +++ b/thirdparty/raspberrypi/includes/opencv2/video/tracking.hpp @@ -0,0 +1,373 @@ +/*! \file tracking.hpp + \brief The Object and Feature Tracking + */ + +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_TRACKING_HPP__ +#define __OPENCV_TRACKING_HPP__ + +#include "opencv2/core/core.hpp" +#include "opencv2/imgproc/imgproc.hpp" + +#ifdef __cplusplus +extern "C" { +#endif + +/****************************************************************************************\ +* Motion Analysis * +\****************************************************************************************/ + +/************************************ optical flow ***************************************/ + +#define CV_LKFLOW_PYR_A_READY 1 +#define CV_LKFLOW_PYR_B_READY 2 +#define CV_LKFLOW_INITIAL_GUESSES 4 +#define CV_LKFLOW_GET_MIN_EIGENVALS 8 + +/* It is Lucas & Kanade method, modified to use pyramids. + Also it does several iterations to get optical flow for + every point at every pyramid level. + Calculates optical flow between two images for certain set of points (i.e. + it is a "sparse" optical flow, which is opposite to the previous 3 methods) */ +CVAPI(void) cvCalcOpticalFlowPyrLK( const CvArr* prev, const CvArr* curr, + CvArr* prev_pyr, CvArr* curr_pyr, + const CvPoint2D32f* prev_features, + CvPoint2D32f* curr_features, + int count, + CvSize win_size, + int level, + char* status, + float* track_error, + CvTermCriteria criteria, + int flags ); + + +/* Modification of a previous sparse optical flow algorithm to calculate + affine flow */ +CVAPI(void) cvCalcAffineFlowPyrLK( const CvArr* prev, const CvArr* curr, + CvArr* prev_pyr, CvArr* curr_pyr, + const CvPoint2D32f* prev_features, + CvPoint2D32f* curr_features, + float* matrices, int count, + CvSize win_size, int level, + char* status, float* track_error, + CvTermCriteria criteria, int flags ); + +/* Estimate rigid transformation between 2 images or 2 point sets */ +CVAPI(int) cvEstimateRigidTransform( const CvArr* A, const CvArr* B, + CvMat* M, int full_affine ); + +/* Estimate optical flow for each pixel using the two-frame G. Farneback algorithm */ +CVAPI(void) cvCalcOpticalFlowFarneback( const CvArr* prev, const CvArr* next, + CvArr* flow, double pyr_scale, int levels, + int winsize, int iterations, int poly_n, + double poly_sigma, int flags ); + +/********************************* motion templates *************************************/ + +/****************************************************************************************\ +* All the motion template functions work only with single channel images. * +* Silhouette image must have depth IPL_DEPTH_8U or IPL_DEPTH_8S * +* Motion history image must have depth IPL_DEPTH_32F, * +* Gradient mask - IPL_DEPTH_8U or IPL_DEPTH_8S, * +* Motion orientation image - IPL_DEPTH_32F * +* Segmentation mask - IPL_DEPTH_32F * +* All the angles are in degrees, all the times are in milliseconds * +\****************************************************************************************/ + +/* Updates motion history image given motion silhouette */ +CVAPI(void) cvUpdateMotionHistory( const CvArr* silhouette, CvArr* mhi, + double timestamp, double duration ); + +/* Calculates gradient of the motion history image and fills + a mask indicating where the gradient is valid */ +CVAPI(void) cvCalcMotionGradient( const CvArr* mhi, CvArr* mask, CvArr* orientation, + double delta1, double delta2, + int aperture_size CV_DEFAULT(3)); + +/* Calculates average motion direction within a selected motion region + (region can be selected by setting ROIs and/or by composing a valid gradient mask + with the region mask) */ +CVAPI(double) cvCalcGlobalOrientation( const CvArr* orientation, const CvArr* mask, + const CvArr* mhi, double timestamp, + double duration ); + +/* Splits a motion history image into a few parts corresponding to separate independent motions + (e.g. left hand, right hand) */ +CVAPI(CvSeq*) cvSegmentMotion( const CvArr* mhi, CvArr* seg_mask, + CvMemStorage* storage, + double timestamp, double seg_thresh ); + +/****************************************************************************************\ +* Tracking * +\****************************************************************************************/ + +/* Implements CAMSHIFT algorithm - determines object position, size and orientation + from the object histogram back project (extension of meanshift) */ +CVAPI(int) cvCamShift( const CvArr* prob_image, CvRect window, + CvTermCriteria criteria, CvConnectedComp* comp, + CvBox2D* box CV_DEFAULT(NULL) ); + +/* Implements MeanShift algorithm - determines object position + from the object histogram back project */ +CVAPI(int) cvMeanShift( const CvArr* prob_image, CvRect window, + CvTermCriteria criteria, CvConnectedComp* comp ); + +/* +standard Kalman filter (in G. Welch' and G. Bishop's notation): + + x(k)=A*x(k-1)+B*u(k)+w(k) p(w)~N(0,Q) + z(k)=H*x(k)+v(k), p(v)~N(0,R) +*/ +typedef struct CvKalman +{ + int MP; /* number of measurement vector dimensions */ + int DP; /* number of state vector dimensions */ + int CP; /* number of control vector dimensions */ + + /* backward compatibility fields */ +#if 1 + float* PosterState; /* =state_pre->data.fl */ + float* PriorState; /* =state_post->data.fl */ + float* DynamMatr; /* =transition_matrix->data.fl */ + float* MeasurementMatr; /* =measurement_matrix->data.fl */ + float* MNCovariance; /* =measurement_noise_cov->data.fl */ + float* PNCovariance; /* =process_noise_cov->data.fl */ + float* KalmGainMatr; /* =gain->data.fl */ + float* PriorErrorCovariance;/* =error_cov_pre->data.fl */ + float* PosterErrorCovariance;/* =error_cov_post->data.fl */ + float* Temp1; /* temp1->data.fl */ + float* Temp2; /* temp2->data.fl */ +#endif + + CvMat* state_pre; /* predicted state (x'(k)): + x(k)=A*x(k-1)+B*u(k) */ + CvMat* state_post; /* corrected state (x(k)): + x(k)=x'(k)+K(k)*(z(k)-H*x'(k)) */ + CvMat* transition_matrix; /* state transition matrix (A) */ + CvMat* control_matrix; /* control matrix (B) + (it is not used if there is no control)*/ + CvMat* measurement_matrix; /* measurement matrix (H) */ + CvMat* process_noise_cov; /* process noise covariance matrix (Q) */ + CvMat* measurement_noise_cov; /* measurement noise covariance matrix (R) */ + CvMat* error_cov_pre; /* priori error estimate covariance matrix (P'(k)): + P'(k)=A*P(k-1)*At + Q)*/ + CvMat* gain; /* Kalman gain matrix (K(k)): + K(k)=P'(k)*Ht*inv(H*P'(k)*Ht+R)*/ + CvMat* error_cov_post; /* posteriori error estimate covariance matrix (P(k)): + P(k)=(I-K(k)*H)*P'(k) */ + CvMat* temp1; /* temporary matrices */ + CvMat* temp2; + CvMat* temp3; + CvMat* temp4; + CvMat* temp5; +} CvKalman; + +/* Creates Kalman filter and sets A, B, Q, R and state to some initial values */ +CVAPI(CvKalman*) cvCreateKalman( int dynam_params, int measure_params, + int control_params CV_DEFAULT(0)); + +/* Releases Kalman filter state */ +CVAPI(void) cvReleaseKalman( CvKalman** kalman); + +/* Updates Kalman filter by time (predicts future state of the system) */ +CVAPI(const CvMat*) cvKalmanPredict( CvKalman* kalman, + const CvMat* control CV_DEFAULT(NULL)); + +/* Updates Kalman filter by measurement + (corrects state of the system and internal matrices) */ +CVAPI(const CvMat*) cvKalmanCorrect( CvKalman* kalman, const CvMat* measurement ); + +#define cvKalmanUpdateByTime cvKalmanPredict +#define cvKalmanUpdateByMeasurement cvKalmanCorrect + +#ifdef __cplusplus +} + +namespace cv +{ + +//! updates motion history image using the current silhouette +CV_EXPORTS_W void updateMotionHistory( InputArray silhouette, InputOutputArray mhi, + double timestamp, double duration ); + +//! computes the motion gradient orientation image from the motion history image +CV_EXPORTS_W void calcMotionGradient( InputArray mhi, OutputArray mask, + OutputArray orientation, + double delta1, double delta2, + int apertureSize=3 ); + +//! computes the global orientation of the selected motion history image part +CV_EXPORTS_W double calcGlobalOrientation( InputArray orientation, InputArray mask, + InputArray mhi, double timestamp, + double duration ); + +CV_EXPORTS_W void segmentMotion(InputArray mhi, OutputArray segmask, + CV_OUT vector<Rect>& boundingRects, + double timestamp, double segThresh); + +//! updates the object tracking window using CAMSHIFT algorithm +CV_EXPORTS_W RotatedRect CamShift( InputArray probImage, CV_OUT CV_IN_OUT Rect& window, + TermCriteria criteria ); + +//! updates the object tracking window using meanshift algorithm +CV_EXPORTS_W int meanShift( InputArray probImage, CV_OUT CV_IN_OUT Rect& window, + TermCriteria criteria ); + +/*! + Kalman filter. + + The class implements standard Kalman filter http://en.wikipedia.org/wiki/Kalman_filter. + However, you can modify KalmanFilter::transitionMatrix, KalmanFilter::controlMatrix and + KalmanFilter::measurementMatrix to get the extended Kalman filter functionality. +*/ +class CV_EXPORTS_W KalmanFilter +{ +public: + //! the default constructor + CV_WRAP KalmanFilter(); + //! the full constructor taking the dimensionality of the state, of the measurement and of the control vector + CV_WRAP KalmanFilter(int dynamParams, int measureParams, int controlParams=0, int type=CV_32F); + //! re-initializes Kalman filter. The previous content is destroyed. + void init(int dynamParams, int measureParams, int controlParams=0, int type=CV_32F); + + //! computes predicted state + CV_WRAP const Mat& predict(const Mat& control=Mat()); + //! updates the predicted state from the measurement + CV_WRAP const Mat& correct(const Mat& measurement); + + Mat statePre; //!< predicted state (x'(k)): x(k)=A*x(k-1)+B*u(k) + Mat statePost; //!< corrected state (x(k)): x(k)=x'(k)+K(k)*(z(k)-H*x'(k)) + Mat transitionMatrix; //!< state transition matrix (A) + Mat controlMatrix; //!< control matrix (B) (not used if there is no control) + Mat measurementMatrix; //!< measurement matrix (H) + Mat processNoiseCov; //!< process noise covariance matrix (Q) + Mat measurementNoiseCov;//!< measurement noise covariance matrix (R) + Mat errorCovPre; //!< priori error estimate covariance matrix (P'(k)): P'(k)=A*P(k-1)*At + Q)*/ + Mat gain; //!< Kalman gain matrix (K(k)): K(k)=P'(k)*Ht*inv(H*P'(k)*Ht+R) + Mat errorCovPost; //!< posteriori error estimate covariance matrix (P(k)): P(k)=(I-K(k)*H)*P'(k) + + // temporary matrices + Mat temp1; + Mat temp2; + Mat temp3; + Mat temp4; + Mat temp5; +}; + +enum +{ + OPTFLOW_USE_INITIAL_FLOW = CV_LKFLOW_INITIAL_GUESSES, + OPTFLOW_LK_GET_MIN_EIGENVALS = CV_LKFLOW_GET_MIN_EIGENVALS, + OPTFLOW_FARNEBACK_GAUSSIAN = 256 +}; + +//! constructs a pyramid which can be used as input for calcOpticalFlowPyrLK +CV_EXPORTS_W int buildOpticalFlowPyramid(InputArray img, OutputArrayOfArrays pyramid, + Size winSize, int maxLevel, bool withDerivatives = true, + int pyrBorder = BORDER_REFLECT_101, int derivBorder = BORDER_CONSTANT, + bool tryReuseInputImage = true); + +//! computes sparse optical flow using multi-scale Lucas-Kanade algorithm +CV_EXPORTS_W void calcOpticalFlowPyrLK( InputArray prevImg, InputArray nextImg, + InputArray prevPts, CV_OUT InputOutputArray nextPts, + OutputArray status, OutputArray err, + Size winSize=Size(21,21), int maxLevel=3, + TermCriteria criteria=TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, 0.01), + int flags=0, double minEigThreshold=1e-4); + +//! computes dense optical flow using Farneback algorithm +CV_EXPORTS_W void calcOpticalFlowFarneback( InputArray prev, InputArray next, + CV_OUT InputOutputArray flow, double pyr_scale, int levels, int winsize, + int iterations, int poly_n, double poly_sigma, int flags ); + +//! estimates the best-fit Euqcidean, similarity, affine or perspective transformation +// that maps one 2D point set to another or one image to another. +CV_EXPORTS_W Mat estimateRigidTransform( InputArray src, InputArray dst, + bool fullAffine); + +//! computes dense optical flow using Simple Flow algorithm +CV_EXPORTS_W void calcOpticalFlowSF(Mat& from, + Mat& to, + Mat& flow, + int layers, + int averaging_block_size, + int max_flow); + +CV_EXPORTS_W void calcOpticalFlowSF(Mat& from, + Mat& to, + Mat& flow, + int layers, + int averaging_block_size, + int max_flow, + double sigma_dist, + double sigma_color, + int postprocess_window, + double sigma_dist_fix, + double sigma_color_fix, + double occ_thr, + int upscale_averaging_radius, + double upscale_sigma_dist, + double upscale_sigma_color, + double speed_up_thr); + +class CV_EXPORTS DenseOpticalFlow : public Algorithm +{ +public: + virtual void calc(InputArray I0, InputArray I1, InputOutputArray flow) = 0; + virtual void collectGarbage() = 0; +}; + +// Implementation of the Zach, Pock and Bischof Dual TV-L1 Optical Flow method +// +// see reference: +// [1] C. Zach, T. Pock and H. Bischof, "A Duality Based Approach for Realtime TV-L1 Optical Flow". +// [2] Javier Sanchez, Enric Meinhardt-Llopis and Gabriele Facciolo. "TV-L1 Optical Flow Estimation". +CV_EXPORTS Ptr<DenseOpticalFlow> createOptFlow_DualTVL1(); + +} + +#endif + +#endif diff --git a/thirdparty/raspberrypi/includes/opencv2/video/video.hpp b/thirdparty/raspberrypi/includes/opencv2/video/video.hpp new file mode 100644 index 0000000..1dd96f5 --- /dev/null +++ b/thirdparty/raspberrypi/includes/opencv2/video/video.hpp @@ -0,0 +1,58 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_VIDEO_HPP__ +#define __OPENCV_VIDEO_HPP__ + +#include "opencv2/video/tracking.hpp" +#include "opencv2/video/background_segm.hpp" + +#ifdef __cplusplus +namespace cv +{ + +CV_EXPORTS bool initModule_video(void); + +} +#endif + +#endif //__OPENCV_VIDEO_HPP__ |