1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279 |
- /*
- By downloading, copying, installing or using the software you agree to this
- license. If you do not agree to this license, do not download, install,
- copy or use the software.
- License Agreement
- For Open Source Computer Vision Library
- (3-clause BSD License)
- Copyright (C) 2013, OpenCV Foundation, all rights reserved.
- Third party copyrights are property of their respective owners.
- Redistribution and use in source and binary forms, with or without modification,
- are permitted provided that the following conditions are met:
- * Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright notice,
- this list of conditions and the following disclaimer in the documentation
- and/or other materials provided with the distribution.
- * Neither the names of the copyright holders nor the names of the contributors
- may be used to endorse or promote products derived from this software
- without specific prior written permission.
- This software is provided by the copyright holders and contributors "as is" and
- any express or implied warranties, including, but not limited to, the implied
- warranties of merchantability and fitness for a particular purpose are
- disclaimed. In no event shall copyright holders or contributors be liable for
- any direct, indirect, incidental, special, exemplary, or consequential damages
- (including, but not limited to, procurement of substitute goods or services;
- loss of use, data, or profits; or business interruption) however caused
- and on any theory of liability, whether in contract, strict liability,
- or tort (including negligence or otherwise) arising in any way out of
- the use of this software, even if advised of the possibility of such damage.
- */
- #ifndef __OPENCV_XFEATURES2D_HPP__
- #define __OPENCV_XFEATURES2D_HPP__
- #include "opencv2/features2d.hpp"
- #include "opencv2/xfeatures2d/nonfree.hpp"
- /** @defgroup xfeatures2d Extra 2D Features Framework
- @{
- @defgroup xfeatures2d_experiment Experimental 2D Features Algorithms
- This section describes experimental algorithms for 2d feature detection.
- @defgroup xfeatures2d_nonfree Non-free 2D Features Algorithms
- This section describes two popular algorithms for 2d feature detection, SIFT and SURF, that are
- known to be patented. You need to set the OPENCV_ENABLE_NONFREE option in cmake to use those. Use them at your own risk.
- @defgroup xfeatures2d_match Experimental 2D Features Matching Algorithm
- This section describes the following matching strategies:
- - GMS: Grid-based Motion Statistics, @cite Bian2017gms
- - LOGOS: Local geometric support for high-outlier spatial verification, @cite Lowry2018LOGOSLG
- @}
- */
- namespace cv
- {
- namespace xfeatures2d
- {
- //! @addtogroup xfeatures2d_experiment
- //! @{
- /** @brief Class implementing the FREAK (*Fast Retina Keypoint*) keypoint descriptor, described in @cite AOV12 .
- The algorithm propose a novel keypoint descriptor inspired by the human visual system and more
- precisely the retina, coined Fast Retina Key- point (FREAK). A cascade of binary strings is
- computed by efficiently comparing image intensities over a retinal sampling pattern. FREAKs are in
- general faster to compute with lower memory load and also more robust than SIFT, SURF or BRISK.
- They are competitive alternatives to existing keypoints in particular for embedded applications.
- @note
- - An example on how to use the FREAK descriptor can be found at
- opencv_source_code/samples/cpp/freak_demo.cpp
- */
- class CV_EXPORTS_W FREAK : public Feature2D
- {
- public:
- static const int NB_SCALES = 64;
- static const int NB_PAIRS = 512;
- static const int NB_ORIENPAIRS = 45;
- /**
- @param orientationNormalized Enable orientation normalization.
- @param scaleNormalized Enable scale normalization.
- @param patternScale Scaling of the description pattern.
- @param nOctaves Number of octaves covered by the detected keypoints.
- @param selectedPairs (Optional) user defined selected pairs indexes,
- */
- CV_WRAP static Ptr<FREAK> create(bool orientationNormalized = true,
- bool scaleNormalized = true,
- float patternScale = 22.0f,
- int nOctaves = 4,
- const std::vector<int>& selectedPairs = std::vector<int>());
- CV_WRAP virtual void setOrientationNormalized(bool orientationNormalized) = 0;
- CV_WRAP virtual bool getOrientationNormalized() const = 0;
- CV_WRAP virtual void setScaleNormalized(bool scaleNormalized) = 0;
- CV_WRAP virtual bool getScaleNormalized() const = 0;
- CV_WRAP virtual void setPatternScale(double patternScale) = 0;
- CV_WRAP virtual double getPatternScale() const = 0;
- CV_WRAP virtual void setNOctaves(int nOctaves) = 0;
- CV_WRAP virtual int getNOctaves() const = 0;
- CV_WRAP String getDefaultName() const CV_OVERRIDE;
- };
- /** @brief The class implements the keypoint detector introduced by @cite Agrawal08, synonym of StarDetector. :
- */
- class CV_EXPORTS_W StarDetector : public Feature2D
- {
- public:
- //! the full constructor
- CV_WRAP static Ptr<StarDetector> create(int maxSize=45, int responseThreshold=30,
- int lineThresholdProjected=10,
- int lineThresholdBinarized=8,
- int suppressNonmaxSize=5);
- CV_WRAP virtual void setMaxSize(int _maxSize) = 0;
- CV_WRAP virtual int getMaxSize() const = 0;
- CV_WRAP virtual void setResponseThreshold(int _responseThreshold) = 0;
- CV_WRAP virtual int getResponseThreshold() const = 0;
- CV_WRAP virtual void setLineThresholdProjected(int _lineThresholdProjected) = 0;
- CV_WRAP virtual int getLineThresholdProjected() const = 0;
- CV_WRAP virtual void setLineThresholdBinarized(int _lineThresholdBinarized) = 0;
- CV_WRAP virtual int getLineThresholdBinarized() const = 0;
- CV_WRAP virtual void setSuppressNonmaxSize(int _suppressNonmaxSize) = 0;
- CV_WRAP virtual int getSuppressNonmaxSize() const = 0;
- CV_WRAP String getDefaultName() const CV_OVERRIDE;
- };
- /*
- * BRIEF Descriptor
- */
- /** @brief Class for computing BRIEF descriptors described in @cite calon2010 .
- @param bytes legth of the descriptor in bytes, valid values are: 16, 32 (default) or 64 .
- @param use_orientation sample patterns using keypoints orientation, disabled by default.
- */
- class CV_EXPORTS_W BriefDescriptorExtractor : public Feature2D
- {
- public:
- CV_WRAP static Ptr<BriefDescriptorExtractor> create( int bytes = 32, bool use_orientation = false );
- CV_WRAP virtual void setDescriptorSize(int bytes) = 0;
- CV_WRAP virtual int getDescriptorSize() const = 0;
- CV_WRAP virtual void setUseOrientation(bool use_orientation) = 0;
- CV_WRAP virtual bool getUseOrientation() const = 0;
- CV_WRAP String getDefaultName() const CV_OVERRIDE;
- };
- /** @brief Class implementing the locally uniform comparison image descriptor, described in @cite LUCID
- An image descriptor that can be computed very fast, while being
- about as robust as, for example, SURF or BRIEF.
- @note It requires a color image as input.
- */
- class CV_EXPORTS_W LUCID : public Feature2D
- {
- public:
- /**
- * @param lucid_kernel kernel for descriptor construction, where 1=3x3, 2=5x5, 3=7x7 and so forth
- * @param blur_kernel kernel for blurring image prior to descriptor construction, where 1=3x3, 2=5x5, 3=7x7 and so forth
- */
- CV_WRAP static Ptr<LUCID> create(const int lucid_kernel = 1, const int blur_kernel = 2);
- CV_WRAP virtual void setLucidKernel(int lucid_kernel) = 0;
- CV_WRAP virtual int getLucidKernel() const = 0;
- CV_WRAP virtual void setBlurKernel(int blur_kernel) = 0;
- CV_WRAP virtual int getBlurKernel() const = 0;
- CV_WRAP String getDefaultName() const CV_OVERRIDE;
- };
- /*
- * LATCH Descriptor
- */
- /** latch Class for computing the LATCH descriptor.
- If you find this code useful, please add a reference to the following paper in your work:
- Gil Levi and Tal Hassner, "LATCH: Learned Arrangements of Three Patch Codes", arXiv preprint arXiv:1501.03719, 15 Jan. 2015
- LATCH is a binary descriptor based on learned comparisons of triplets of image patches.
- * bytes is the size of the descriptor - can be 64, 32, 16, 8, 4, 2 or 1
- * rotationInvariance - whether or not the descriptor should compansate for orientation changes.
- * half_ssd_size - the size of half of the mini-patches size. For example, if we would like to compare triplets of patches of size 7x7x
- then the half_ssd_size should be (7-1)/2 = 3.
- * sigma - sigma value for GaussianBlur smoothing of the source image. Source image will be used without smoothing in case sigma value is 0.
- Note: the descriptor can be coupled with any keypoint extractor. The only demand is that if you use set rotationInvariance = True then
- you will have to use an extractor which estimates the patch orientation (in degrees). Examples for such extractors are ORB and SIFT.
- Note: a complete example can be found under /samples/cpp/tutorial_code/xfeatures2D/latch_match.cpp
- */
- class CV_EXPORTS_W LATCH : public Feature2D
- {
- public:
- CV_WRAP static Ptr<LATCH> create(int bytes = 32, bool rotationInvariance = true, int half_ssd_size = 3, double sigma = 2.0);
- CV_WRAP virtual void setBytes(int bytes) = 0;
- CV_WRAP virtual int getBytes() const = 0;
- CV_WRAP virtual void setRotationInvariance(bool rotationInvariance) = 0;
- CV_WRAP virtual bool getRotationInvariance() const = 0;
- CV_WRAP virtual void setHalfSSDsize(int half_ssd_size) = 0;
- CV_WRAP virtual int getHalfSSDsize() const = 0;
- CV_WRAP virtual void setSigma(double sigma) = 0;
- CV_WRAP virtual double getSigma() const = 0;
- CV_WRAP String getDefaultName() const CV_OVERRIDE;
- };
- /** @brief Class implementing BEBLID (Boosted Efficient Binary Local Image Descriptor),
- * described in @cite Suarez2020BEBLID .
- BEBLID \cite Suarez2020BEBLID is a efficient binary descriptor learned with boosting.
- It is able to describe keypoints from any detector just by changing the scale_factor parameter.
- In several benchmarks it has proved to largely improve other binary descriptors like ORB or
- BRISK with the same efficiency. BEBLID describes using the difference of mean gray values in
- different regions of the image around the KeyPoint, the descriptor is specifically optimized for
- image matching and patch retrieval addressing the asymmetries of these problems.
- If you find this code useful, please add a reference to the following paper:
- <BLOCKQUOTE> Iago Suárez, Ghesn Sfeir, José M. Buenaposada, and Luis Baumela.
- BEBLID: Boosted efficient binary local image descriptor.
- Pattern Recognition Letters, 133:366–372, 2020. </BLOCKQUOTE>
- The descriptor was trained using 1 million of randomly sampled pairs of patches
- (20% positives and 80% negatives) from the Liberty split of the UBC datasets
- \cite winder2007learning as described in the paper @cite Suarez2020BEBLID.
- You can check in the [AKAZE example](https://raw.githubusercontent.com/opencv/opencv/master/samples/cpp/tutorial_code/features2D/AKAZE_match.cpp)
- how well BEBLID works. Detecting 10000 keypoints with ORB and describing with BEBLID obtains
- 561 inliers (75%) whereas describing with ORB obtains only 493 inliers (63%).
- */
- class CV_EXPORTS_W BEBLID : public Feature2D
- {
- public:
- /**
- * @brief Descriptor number of bits, each bit is a boosting weak-learner.
- * The user can choose between 512 or 256 bits.
- */
- enum BeblidSize
- {
- SIZE_512_BITS = 100, SIZE_256_BITS = 101,
- };
- /** @brief Creates the BEBLID descriptor.
- @param scale_factor Adjust the sampling window around detected keypoints:
- - <b> 1.00f </b> should be the scale for ORB keypoints
- - <b> 6.75f </b> should be the scale for SIFT detected keypoints
- - <b> 6.25f </b> is default and fits for KAZE, SURF detected keypoints
- - <b> 5.00f </b> should be the scale for AKAZE, MSD, AGAST, FAST, BRISK keypoints
- @param n_bits Determine the number of bits in the descriptor. Should be either
- BEBLID::SIZE_512_BITS or BEBLID::SIZE_256_BITS.
- */
- CV_WRAP static Ptr<BEBLID> create(float scale_factor, int n_bits = BEBLID::SIZE_512_BITS);
- CV_WRAP virtual void setScaleFactor(float scale_factor) = 0;
- CV_WRAP virtual float getScaleFactor() const = 0;
- CV_WRAP String getDefaultName() const CV_OVERRIDE;
- };
- /** @brief Class implementing TEBLID (Triplet-based Efficient Binary Local Image Descriptor),
- * described in @cite Suarez2021TEBLID.
- TEBLID stands for Triplet-based Efficient Binary Local Image Descriptor, although originally it was called BAD
- \cite Suarez2021TEBLID. It is an improvement over BEBLID \cite Suarez2020BEBLID, that uses triplet loss,
- hard negative mining, and anchor swap to improve the image matching results.
- It is able to describe keypoints from any detector just by changing the scale_factor parameter.
- TEBLID is as efficient as ORB, BEBLID or BRISK, but the triplet-based training objective selected more
- discriminative features that explain the accuracy gain. It is also more compact than BEBLID,
- when running the [AKAZE example](https://github.com/opencv/opencv/blob/4.x/samples/cpp/tutorial_code/features2D/AKAZE_match.cpp)
- with 10000 keypoints detected by ORB, BEBLID obtains 561 inliers (75%) with 512 bits, whereas
- TEBLID obtains 621 (75.2%) with 256 bits. ORB obtains only 493 inliers (63%).
- If you find this code useful, please add a reference to the following paper:
- <BLOCKQUOTE> Iago Suárez, José M. Buenaposada, and Luis Baumela.
- Revisiting Binary Local Image Description for Resource Limited Devices.
- IEEE Robotics and Automation Letters, vol. 6, no. 4, pp. 8317-8324, Oct. 2021. </BLOCKQUOTE>
- The descriptor was trained in Liberty split of the UBC datasets \cite winder2007learning .
- */
- class CV_EXPORTS_W TEBLID : public Feature2D
- {
- public:
- /**
- * @brief Descriptor number of bits, each bit is a box average difference.
- * The user can choose between 256 or 512 bits.
- */
- enum TeblidSize
- {
- SIZE_256_BITS = 102, SIZE_512_BITS = 103,
- };
- /** @brief Creates the TEBLID descriptor.
- @param scale_factor Adjust the sampling window around detected keypoints:
- - <b> 1.00f </b> should be the scale for ORB keypoints
- - <b> 6.75f </b> should be the scale for SIFT detected keypoints
- - <b> 6.25f </b> is default and fits for KAZE, SURF detected keypoints
- - <b> 5.00f </b> should be the scale for AKAZE, MSD, AGAST, FAST, BRISK keypoints
- @param n_bits Determine the number of bits in the descriptor. Should be either
- TEBLID::SIZE_256_BITS or TEBLID::SIZE_512_BITS.
- */
- CV_WRAP static Ptr<TEBLID> create(float scale_factor, int n_bits = TEBLID::SIZE_256_BITS);
- CV_WRAP String getDefaultName() const CV_OVERRIDE;
- };
- /** @brief Class implementing DAISY descriptor, described in @cite Tola10
- @param radius radius of the descriptor at the initial scale
- @param q_radius amount of radial range division quantity
- @param q_theta amount of angular range division quantity
- @param q_hist amount of gradient orientations range division quantity
- @param norm choose descriptors normalization type, where
- DAISY::NRM_NONE will not do any normalization (default),
- DAISY::NRM_PARTIAL mean that histograms are normalized independently for L2 norm equal to 1.0,
- DAISY::NRM_FULL mean that descriptors are normalized for L2 norm equal to 1.0,
- DAISY::NRM_SIFT mean that descriptors are normalized for L2 norm equal to 1.0 but no individual one is bigger than 0.154 as in SIFT
- @param H optional 3x3 homography matrix used to warp the grid of daisy but sampling keypoints remains unwarped on image
- @param interpolation switch to disable interpolation for speed improvement at minor quality loss
- @param use_orientation sample patterns using keypoints orientation, disabled by default.
- */
- class CV_EXPORTS_W DAISY : public Feature2D
- {
- public:
- enum NormalizationType
- {
- NRM_NONE = 100, NRM_PARTIAL = 101, NRM_FULL = 102, NRM_SIFT = 103,
- };
- CV_WRAP static Ptr<DAISY> create( float radius = 15, int q_radius = 3, int q_theta = 8,
- int q_hist = 8, DAISY::NormalizationType norm = DAISY::NRM_NONE, InputArray H = noArray(),
- bool interpolation = true, bool use_orientation = false );
- CV_WRAP virtual void setRadius(float radius) = 0;
- CV_WRAP virtual float getRadius() const = 0;
- CV_WRAP virtual void setQRadius(int q_radius) = 0;
- CV_WRAP virtual int getQRadius() const = 0;
- CV_WRAP virtual void setQTheta(int q_theta) = 0;
- CV_WRAP virtual int getQTheta() const = 0;
- CV_WRAP virtual void setQHist(int q_hist) = 0;
- CV_WRAP virtual int getQHist() const = 0;
- CV_WRAP virtual void setNorm(int norm) = 0;
- CV_WRAP virtual int getNorm() const = 0;
- CV_WRAP virtual void setH(InputArray H) = 0;
- CV_WRAP virtual cv::Mat getH() const = 0;
- CV_WRAP virtual void setInterpolation(bool interpolation) = 0;
- CV_WRAP virtual bool getInterpolation() const = 0;
- CV_WRAP virtual void setUseOrientation(bool use_orientation) = 0;
- CV_WRAP virtual bool getUseOrientation() const = 0;
- CV_WRAP String getDefaultName() const CV_OVERRIDE;
- /** @overload
- * @param image image to extract descriptors
- * @param keypoints of interest within image
- * @param descriptors resulted descriptors array
- */
- virtual void compute( InputArray image, std::vector<KeyPoint>& keypoints, OutputArray descriptors ) CV_OVERRIDE = 0;
- virtual void compute( InputArrayOfArrays images,
- std::vector<std::vector<KeyPoint> >& keypoints,
- OutputArrayOfArrays descriptors ) CV_OVERRIDE;
- /** @overload
- * @param image image to extract descriptors
- * @param roi region of interest within image
- * @param descriptors resulted descriptors array for roi image pixels
- */
- virtual void compute( InputArray image, Rect roi, OutputArray descriptors ) = 0;
- /**@overload
- * @param image image to extract descriptors
- * @param descriptors resulted descriptors array for all image pixels
- */
- virtual void compute( InputArray image, OutputArray descriptors ) = 0;
- /**
- * @param y position y on image
- * @param x position x on image
- * @param orientation orientation on image (0->360)
- * @param descriptor supplied array for descriptor storage
- */
- virtual void GetDescriptor( double y, double x, int orientation, float* descriptor ) const = 0;
- /**
- * @param y position y on image
- * @param x position x on image
- * @param orientation orientation on image (0->360)
- * @param descriptor supplied array for descriptor storage
- * @param H homography matrix for warped grid
- */
- virtual bool GetDescriptor( double y, double x, int orientation, float* descriptor, double* H ) const = 0;
- /**
- * @param y position y on image
- * @param x position x on image
- * @param orientation orientation on image (0->360)
- * @param descriptor supplied array for descriptor storage
- */
- virtual void GetUnnormalizedDescriptor( double y, double x, int orientation, float* descriptor ) const = 0;
- /**
- * @param y position y on image
- * @param x position x on image
- * @param orientation orientation on image (0->360)
- * @param descriptor supplied array for descriptor storage
- * @param H homography matrix for warped grid
- */
- virtual bool GetUnnormalizedDescriptor( double y, double x, int orientation, float* descriptor , double *H ) const = 0;
- };
- /** @brief Class implementing the MSD (*Maximal Self-Dissimilarity*) keypoint detector, described in @cite Tombari14.
- The algorithm implements a novel interest point detector stemming from the intuition that image patches
- which are highly dissimilar over a relatively large extent of their surroundings hold the property of
- being repeatable and distinctive. This concept of "contextual self-dissimilarity" reverses the key
- paradigm of recent successful techniques such as the Local Self-Similarity descriptor and the Non-Local
- Means filter, which build upon the presence of similar - rather than dissimilar - patches. Moreover,
- it extends to contextual information the local self-dissimilarity notion embedded in established
- detectors of corner-like interest points, thereby achieving enhanced repeatability, distinctiveness and
- localization accuracy.
- */
- class CV_EXPORTS_W MSDDetector : public Feature2D {
- public:
- CV_WRAP static Ptr<MSDDetector> create(int m_patch_radius = 3, int m_search_area_radius = 5,
- int m_nms_radius = 5, int m_nms_scale_radius = 0, float m_th_saliency = 250.0f, int m_kNN = 4,
- float m_scale_factor = 1.25f, int m_n_scales = -1, bool m_compute_orientation = false);
- CV_WRAP virtual void setPatchRadius(int patch_radius) = 0;
- CV_WRAP virtual int getPatchRadius() const = 0;
- CV_WRAP virtual void setSearchAreaRadius(int use_orientation) = 0;
- CV_WRAP virtual int getSearchAreaRadius() const = 0;
- CV_WRAP virtual void setNmsRadius(int nms_radius) = 0;
- CV_WRAP virtual int getNmsRadius() const = 0;
- CV_WRAP virtual void setNmsScaleRadius(int nms_scale_radius) = 0;
- CV_WRAP virtual int getNmsScaleRadius() const = 0;
- CV_WRAP virtual void setThSaliency(float th_saliency) = 0;
- CV_WRAP virtual float getThSaliency() const = 0;
- CV_WRAP virtual void setKNN(int kNN) = 0;
- CV_WRAP virtual int getKNN() const = 0;
- CV_WRAP virtual void setScaleFactor(float scale_factor) = 0;
- CV_WRAP virtual float getScaleFactor() const = 0;
- CV_WRAP virtual void setNScales(int use_orientation) = 0;
- CV_WRAP virtual int getNScales() const = 0;
- CV_WRAP virtual void setComputeOrientation(bool compute_orientation) = 0;
- CV_WRAP virtual bool getComputeOrientation() const = 0;
- CV_WRAP String getDefaultName() const CV_OVERRIDE;
- };
- /** @brief Class implementing VGG (Oxford Visual Geometry Group) descriptor trained end to end
- using "Descriptor Learning Using Convex Optimisation" (DLCO) aparatus described in @cite Simonyan14.
- @param desc type of descriptor to use, VGG::VGG_120 is default (120 dimensions float)
- Available types are VGG::VGG_120, VGG::VGG_80, VGG::VGG_64, VGG::VGG_48
- @param isigma gaussian kernel value for image blur (default is 1.4f)
- @param img_normalize use image sample intensity normalization (enabled by default)
- @param use_orientation sample patterns using keypoints orientation, enabled by default
- @param scale_factor adjust the sampling window of detected keypoints to 64.0f (VGG sampling window)
- 6.25f is default and fits for KAZE, SURF detected keypoints window ratio
- 6.75f should be the scale for SIFT detected keypoints window ratio
- 5.00f should be the scale for AKAZE, MSD, AGAST, FAST, BRISK keypoints window ratio
- 0.75f should be the scale for ORB keypoints ratio
- @param dsc_normalize clamp descriptors to 255 and convert to uchar CV_8UC1 (disabled by default)
- */
- class CV_EXPORTS_W VGG : public Feature2D
- {
- public:
- CV_WRAP enum
- {
- VGG_120 = 100, VGG_80 = 101, VGG_64 = 102, VGG_48 = 103,
- };
- CV_WRAP static Ptr<VGG> create( int desc = VGG::VGG_120, float isigma = 1.4f,
- bool img_normalize = true, bool use_scale_orientation = true,
- float scale_factor = 6.25f, bool dsc_normalize = false );
- CV_WRAP String getDefaultName() const CV_OVERRIDE;
- CV_WRAP virtual void setSigma(const float isigma) = 0;
- CV_WRAP virtual float getSigma() const = 0;
- CV_WRAP virtual void setUseNormalizeImage(const bool img_normalize) = 0;
- CV_WRAP virtual bool getUseNormalizeImage() const = 0;
- CV_WRAP virtual void setUseScaleOrientation(const bool use_scale_orientation) = 0;
- CV_WRAP virtual bool getUseScaleOrientation() const = 0;
- CV_WRAP virtual void setScaleFactor(const float scale_factor) = 0;
- CV_WRAP virtual float getScaleFactor() const = 0;
- CV_WRAP virtual void setUseNormalizeDescriptor(const bool dsc_normalize) = 0;
- CV_WRAP virtual bool getUseNormalizeDescriptor() const = 0;
- };
- /** @brief Class implementing BoostDesc (Learning Image Descriptors with Boosting), described in
- @cite Trzcinski13a and @cite Trzcinski13b.
- @param desc type of descriptor to use, BoostDesc::BINBOOST_256 is default (256 bit long dimension)
- Available types are: BoostDesc::BGM, BoostDesc::BGM_HARD, BoostDesc::BGM_BILINEAR, BoostDesc::LBGM,
- BoostDesc::BINBOOST_64, BoostDesc::BINBOOST_128, BoostDesc::BINBOOST_256
- @param use_orientation sample patterns using keypoints orientation, enabled by default
- @param scale_factor adjust the sampling window of detected keypoints
- 6.25f is default and fits for KAZE, SURF detected keypoints window ratio
- 6.75f should be the scale for SIFT detected keypoints window ratio
- 5.00f should be the scale for AKAZE, MSD, AGAST, FAST, BRISK keypoints window ratio
- 0.75f should be the scale for ORB keypoints ratio
- 1.50f was the default in original implementation
- @note BGM is the base descriptor where each binary dimension is computed as the output of a single weak learner.
- BGM_HARD and BGM_BILINEAR refers to same BGM but use different type of gradient binning. In the BGM_HARD that
- use ASSIGN_HARD binning type the gradient is assigned to the nearest orientation bin. In the BGM_BILINEAR that use
- ASSIGN_BILINEAR binning type the gradient is assigned to the two neighbouring bins. In the BGM and all other modes that use
- ASSIGN_SOFT binning type the gradient is assigned to 8 nearest bins according to the cosine value between the gradient
- angle and the bin center. LBGM (alias FP-Boost) is the floating point extension where each dimension is computed
- as a linear combination of the weak learner responses. BINBOOST and subvariants are the binary extensions of LBGM
- where each bit is computed as a thresholded linear combination of a set of weak learners.
- BoostDesc header files (boostdesc_*.i) was exported from original binaries with export-boostdesc.py script from
- samples subfolder.
- */
- class CV_EXPORTS_W BoostDesc : public Feature2D
- {
- public:
- CV_WRAP enum
- {
- BGM = 100, BGM_HARD = 101, BGM_BILINEAR = 102, LBGM = 200,
- BINBOOST_64 = 300, BINBOOST_128 = 301, BINBOOST_256 = 302
- };
- CV_WRAP static Ptr<BoostDesc> create( int desc = BoostDesc::BINBOOST_256,
- bool use_scale_orientation = true, float scale_factor = 6.25f );
- CV_WRAP String getDefaultName() const CV_OVERRIDE;
- CV_WRAP virtual void setUseScaleOrientation(const bool use_scale_orientation) = 0;
- CV_WRAP virtual bool getUseScaleOrientation() const = 0;
- CV_WRAP virtual void setScaleFactor(const float scale_factor) = 0;
- CV_WRAP virtual float getScaleFactor() const = 0;
- };
- /*
- * Position-Color-Texture signatures
- */
- /**
- * @brief Class implementing PCT (position-color-texture) signature extraction
- * as described in @cite KrulisLS16.
- * The algorithm is divided to a feature sampler and a clusterizer.
- * Feature sampler produces samples at given set of coordinates.
- * Clusterizer then produces clusters of these samples using k-means algorithm.
- * Resulting set of clusters is the signature of the input image.
- *
- * A signature is an array of SIGNATURE_DIMENSION-dimensional points.
- * Used dimensions are:
- * weight, x, y position; lab color, contrast, entropy.
- * @cite KrulisLS16
- * @cite BeecksUS10
- */
- class CV_EXPORTS_W PCTSignatures : public Algorithm
- {
- public:
- /**
- * @brief Lp distance function selector.
- */
- enum DistanceFunction
- {
- L0_25, L0_5, L1, L2, L2SQUARED, L5, L_INFINITY
- };
- /**
- * @brief Point distributions supported by random point generator.
- */
- enum PointDistribution
- {
- UNIFORM, //!< Generate numbers uniformly.
- REGULAR, //!< Generate points in a regular grid.
- NORMAL //!< Generate points with normal (gaussian) distribution.
- };
- /**
- * @brief Similarity function selector.
- * @see
- * Christian Beecks, Merih Seran Uysal, Thomas Seidl.
- * Signature quadratic form distance.
- * In Proceedings of the ACM International Conference on Image and Video Retrieval, pages 438-445.
- * ACM, 2010.
- * @cite BeecksUS10
- * @note For selected distance function: \f[ d(c_i, c_j) \f] and parameter: \f[ \alpha \f]
- */
- enum SimilarityFunction
- {
- MINUS, //!< \f[ -d(c_i, c_j) \f]
- GAUSSIAN, //!< \f[ e^{ -\alpha * d^2(c_i, c_j)} \f]
- HEURISTIC //!< \f[ \frac{1}{\alpha + d(c_i, c_j)} \f]
- };
- /**
- * @brief Creates PCTSignatures algorithm using sample and seed count.
- * It generates its own sets of sampling points and clusterization seed indexes.
- * @param initSampleCount Number of points used for image sampling.
- * @param initSeedCount Number of initial clusterization seeds.
- * Must be lower or equal to initSampleCount
- * @param pointDistribution Distribution of generated points. Default: UNIFORM.
- * Available: UNIFORM, REGULAR, NORMAL.
- * @return Created algorithm.
- */
- CV_WRAP static Ptr<PCTSignatures> create(
- const int initSampleCount = 2000,
- const int initSeedCount = 400,
- const int pointDistribution = 0);
- /**
- * @brief Creates PCTSignatures algorithm using pre-generated sampling points
- * and number of clusterization seeds. It uses the provided
- * sampling points and generates its own clusterization seed indexes.
- * @param initSamplingPoints Sampling points used in image sampling.
- * @param initSeedCount Number of initial clusterization seeds.
- * Must be lower or equal to initSamplingPoints.size().
- * @return Created algorithm.
- */
- CV_WRAP static Ptr<PCTSignatures> create(
- const std::vector<Point2f>& initSamplingPoints,
- const int initSeedCount);
- /**
- * @brief Creates PCTSignatures algorithm using pre-generated sampling points
- * and clusterization seeds indexes.
- * @param initSamplingPoints Sampling points used in image sampling.
- * @param initClusterSeedIndexes Indexes of initial clusterization seeds.
- * Its size must be lower or equal to initSamplingPoints.size().
- * @return Created algorithm.
- */
- CV_WRAP static Ptr<PCTSignatures> create(
- const std::vector<Point2f>& initSamplingPoints,
- const std::vector<int>& initClusterSeedIndexes);
- /**
- * @brief Computes signature of given image.
- * @param image Input image of CV_8U type.
- * @param signature Output computed signature.
- */
- CV_WRAP virtual void computeSignature(
- InputArray image,
- OutputArray signature) const = 0;
- /**
- * @brief Computes signatures for multiple images in parallel.
- * @param images Vector of input images of CV_8U type.
- * @param signatures Vector of computed signatures.
- */
- CV_WRAP virtual void computeSignatures(
- const std::vector<Mat>& images,
- std::vector<Mat>& signatures) const = 0;
- /**
- * @brief Draws signature in the source image and outputs the result.
- * Signatures are visualized as a circle
- * with radius based on signature weight
- * and color based on signature color.
- * Contrast and entropy are not visualized.
- * @param source Source image.
- * @param signature Image signature.
- * @param result Output result.
- * @param radiusToShorterSideRatio Determines maximal radius of signature in the output image.
- * @param borderThickness Border thickness of the visualized signature.
- */
- CV_WRAP static void drawSignature(
- InputArray source,
- InputArray signature,
- OutputArray result,
- float radiusToShorterSideRatio = 1.0 / 8,
- int borderThickness = 1);
- /**
- * @brief Generates initial sampling points according to selected point distribution.
- * @param initPoints Output vector where the generated points will be saved.
- * @param count Number of points to generate.
- * @param pointDistribution Point distribution selector.
- * Available: UNIFORM, REGULAR, NORMAL.
- * @note Generated coordinates are in range [0..1)
- */
- CV_WRAP static void generateInitPoints(
- std::vector<Point2f>& initPoints,
- const int count,
- int pointDistribution);
- /**** sampler ****/
- /**
- * @brief Number of initial samples taken from the image.
- */
- CV_WRAP virtual int getSampleCount() const = 0;
- /**
- * @brief Color resolution of the greyscale bitmap represented in allocated bits
- * (i.e., value 4 means that 16 shades of grey are used).
- * The greyscale bitmap is used for computing contrast and entropy values.
- */
- CV_WRAP virtual int getGrayscaleBits() const = 0;
- /**
- * @brief Color resolution of the greyscale bitmap represented in allocated bits
- * (i.e., value 4 means that 16 shades of grey are used).
- * The greyscale bitmap is used for computing contrast and entropy values.
- */
- CV_WRAP virtual void setGrayscaleBits(int grayscaleBits) = 0;
- /**
- * @brief Size of the texture sampling window used to compute contrast and entropy
- * (center of the window is always in the pixel selected by x,y coordinates
- * of the corresponding feature sample).
- */
- CV_WRAP virtual int getWindowRadius() const = 0;
- /**
- * @brief Size of the texture sampling window used to compute contrast and entropy
- * (center of the window is always in the pixel selected by x,y coordinates
- * of the corresponding feature sample).
- */
- CV_WRAP virtual void setWindowRadius(int radius) = 0;
- /**
- * @brief Weights (multiplicative constants) that linearly stretch individual axes of the feature space
- * (x,y = position; L,a,b = color in CIE Lab space; c = contrast. e = entropy)
- */
- CV_WRAP virtual float getWeightX() const = 0;
- /**
- * @brief Weights (multiplicative constants) that linearly stretch individual axes of the feature space
- * (x,y = position; L,a,b = color in CIE Lab space; c = contrast. e = entropy)
- */
- CV_WRAP virtual void setWeightX(float weight) = 0;
- /**
- * @brief Weights (multiplicative constants) that linearly stretch individual axes of the feature space
- * (x,y = position; L,a,b = color in CIE Lab space; c = contrast. e = entropy)
- */
- CV_WRAP virtual float getWeightY() const = 0;
- /**
- * @brief Weights (multiplicative constants) that linearly stretch individual axes of the feature space
- * (x,y = position; L,a,b = color in CIE Lab space; c = contrast. e = entropy)
- */
- CV_WRAP virtual void setWeightY(float weight) = 0;
- /**
- * @brief Weights (multiplicative constants) that linearly stretch individual axes of the feature space
- * (x,y = position; L,a,b = color in CIE Lab space; c = contrast. e = entropy)
- */
- CV_WRAP virtual float getWeightL() const = 0;
- /**
- * @brief Weights (multiplicative constants) that linearly stretch individual axes of the feature space
- * (x,y = position; L,a,b = color in CIE Lab space; c = contrast. e = entropy)
- */
- CV_WRAP virtual void setWeightL(float weight) = 0;
- /**
- * @brief Weights (multiplicative constants) that linearly stretch individual axes of the feature space
- * (x,y = position; L,a,b = color in CIE Lab space; c = contrast. e = entropy)
- */
- CV_WRAP virtual float getWeightA() const = 0;
- /**
- * @brief Weights (multiplicative constants) that linearly stretch individual axes of the feature space
- * (x,y = position; L,a,b = color in CIE Lab space; c = contrast. e = entropy)
- */
- CV_WRAP virtual void setWeightA(float weight) = 0;
- /**
- * @brief Weights (multiplicative constants) that linearly stretch individual axes of the feature space
- * (x,y = position; L,a,b = color in CIE Lab space; c = contrast. e = entropy)
- */
- CV_WRAP virtual float getWeightB() const = 0;
- /**
- * @brief Weights (multiplicative constants) that linearly stretch individual axes of the feature space
- * (x,y = position; L,a,b = color in CIE Lab space; c = contrast. e = entropy)
- */
- CV_WRAP virtual void setWeightB(float weight) = 0;
- /**
- * @brief Weights (multiplicative constants) that linearly stretch individual axes of the feature space
- * (x,y = position; L,a,b = color in CIE Lab space; c = contrast. e = entropy)
- */
- CV_WRAP virtual float getWeightContrast() const = 0;
- /**
- * @brief Weights (multiplicative constants) that linearly stretch individual axes of the feature space
- * (x,y = position; L,a,b = color in CIE Lab space; c = contrast. e = entropy)
- */
- CV_WRAP virtual void setWeightContrast(float weight) = 0;
- /**
- * @brief Weights (multiplicative constants) that linearly stretch individual axes of the feature space
- * (x,y = position; L,a,b = color in CIE Lab space; c = contrast. e = entropy)
- */
- CV_WRAP virtual float getWeightEntropy() const = 0;
- /**
- * @brief Weights (multiplicative constants) that linearly stretch individual axes of the feature space
- * (x,y = position; L,a,b = color in CIE Lab space; c = contrast. e = entropy)
- */
- CV_WRAP virtual void setWeightEntropy(float weight) = 0;
- /**
- * @brief Initial samples taken from the image.
- * These sampled features become the input for clustering.
- */
- CV_WRAP virtual std::vector<Point2f> getSamplingPoints() const = 0;
- /**
- * @brief Weights (multiplicative constants) that linearly stretch individual axes of the feature space.
- * @param idx ID of the weight
- * @param value Value of the weight
- * @note
- * WEIGHT_IDX = 0;
- * X_IDX = 1;
- * Y_IDX = 2;
- * L_IDX = 3;
- * A_IDX = 4;
- * B_IDX = 5;
- * CONTRAST_IDX = 6;
- * ENTROPY_IDX = 7;
- */
- CV_WRAP virtual void setWeight(int idx, float value) = 0;
- /**
- * @brief Weights (multiplicative constants) that linearly stretch individual axes of the feature space.
- * @param weights Values of all weights.
- * @note
- * WEIGHT_IDX = 0;
- * X_IDX = 1;
- * Y_IDX = 2;
- * L_IDX = 3;
- * A_IDX = 4;
- * B_IDX = 5;
- * CONTRAST_IDX = 6;
- * ENTROPY_IDX = 7;
- */
- CV_WRAP virtual void setWeights(const std::vector<float>& weights) = 0;
- /**
- * @brief Translations of the individual axes of the feature space.
- * @param idx ID of the translation
- * @param value Value of the translation
- * @note
- * WEIGHT_IDX = 0;
- * X_IDX = 1;
- * Y_IDX = 2;
- * L_IDX = 3;
- * A_IDX = 4;
- * B_IDX = 5;
- * CONTRAST_IDX = 6;
- * ENTROPY_IDX = 7;
- */
- CV_WRAP virtual void setTranslation(int idx, float value) = 0;
- /**
- * @brief Translations of the individual axes of the feature space.
- * @param translations Values of all translations.
- * @note
- * WEIGHT_IDX = 0;
- * X_IDX = 1;
- * Y_IDX = 2;
- * L_IDX = 3;
- * A_IDX = 4;
- * B_IDX = 5;
- * CONTRAST_IDX = 6;
- * ENTROPY_IDX = 7;
- */
- CV_WRAP virtual void setTranslations(const std::vector<float>& translations) = 0;
- /**
- * @brief Sets sampling points used to sample the input image.
- * @param samplingPoints Vector of sampling points in range [0..1)
- * @note Number of sampling points must be greater or equal to clusterization seed count.
- */
- CV_WRAP virtual void setSamplingPoints(std::vector<Point2f> samplingPoints) = 0;
- /**** clusterizer ****/
- /**
- * @brief Initial seeds (initial number of clusters) for the k-means algorithm.
- */
- CV_WRAP virtual std::vector<int> getInitSeedIndexes() const = 0;
- /**
- * @brief Initial seed indexes for the k-means algorithm.
- */
- CV_WRAP virtual void setInitSeedIndexes(std::vector<int> initSeedIndexes) = 0;
- /**
- * @brief Number of initial seeds (initial number of clusters) for the k-means algorithm.
- */
- CV_WRAP virtual int getInitSeedCount() const = 0;
- /**
- * @brief Number of iterations of the k-means clustering.
- * We use fixed number of iterations, since the modified clustering is pruning clusters
- * (not iteratively refining k clusters).
- */
- CV_WRAP virtual int getIterationCount() const = 0;
- /**
- * @brief Number of iterations of the k-means clustering.
- * We use fixed number of iterations, since the modified clustering is pruning clusters
- * (not iteratively refining k clusters).
- */
- CV_WRAP virtual void setIterationCount(int iterationCount) = 0;
- /**
- * @brief Maximal number of generated clusters. If the number is exceeded,
- * the clusters are sorted by their weights and the smallest clusters are cropped.
- */
- CV_WRAP virtual int getMaxClustersCount() const = 0;
- /**
- * @brief Maximal number of generated clusters. If the number is exceeded,
- * the clusters are sorted by their weights and the smallest clusters are cropped.
- */
- CV_WRAP virtual void setMaxClustersCount(int maxClustersCount) = 0;
- /**
- * @brief This parameter multiplied by the index of iteration gives lower limit for cluster size.
- * Clusters containing fewer points than specified by the limit have their centroid dismissed
- * and points are reassigned.
- */
- CV_WRAP virtual int getClusterMinSize() const = 0;
- /**
- * @brief This parameter multiplied by the index of iteration gives lower limit for cluster size.
- * Clusters containing fewer points than specified by the limit have their centroid dismissed
- * and points are reassigned.
- */
- CV_WRAP virtual void setClusterMinSize(int clusterMinSize) = 0;
- /**
- * @brief Threshold euclidean distance between two centroids.
- * If two cluster centers are closer than this distance,
- * one of the centroid is dismissed and points are reassigned.
- */
- CV_WRAP virtual float getJoiningDistance() const = 0;
- /**
- * @brief Threshold euclidean distance between two centroids.
- * If two cluster centers are closer than this distance,
- * one of the centroid is dismissed and points are reassigned.
- */
- CV_WRAP virtual void setJoiningDistance(float joiningDistance) = 0;
- /**
- * @brief Remove centroids in k-means whose weight is lesser or equal to given threshold.
- */
- CV_WRAP virtual float getDropThreshold() const = 0;
- /**
- * @brief Remove centroids in k-means whose weight is lesser or equal to given threshold.
- */
- CV_WRAP virtual void setDropThreshold(float dropThreshold) = 0;
- /**
- * @brief Distance function selector used for measuring distance between two points in k-means.
- */
- CV_WRAP virtual int getDistanceFunction() const = 0;
- /**
- * @brief Distance function selector used for measuring distance between two points in k-means.
- * Available: L0_25, L0_5, L1, L2, L2SQUARED, L5, L_INFINITY.
- */
- CV_WRAP virtual void setDistanceFunction(int distanceFunction) = 0;
- };
- /**
- * @brief Class implementing Signature Quadratic Form Distance (SQFD).
- * @see Christian Beecks, Merih Seran Uysal, Thomas Seidl.
- * Signature quadratic form distance.
- * In Proceedings of the ACM International Conference on Image and Video Retrieval, pages 438-445.
- * ACM, 2010.
- * @cite BeecksUS10
- */
- class CV_EXPORTS_W PCTSignaturesSQFD : public Algorithm
- {
- public:
- /**
- * @brief Creates the algorithm instance using selected distance function,
- * similarity function and similarity function parameter.
- * @param distanceFunction Distance function selector. Default: L2
- * Available: L0_25, L0_5, L1, L2, L2SQUARED, L5, L_INFINITY
- * @param similarityFunction Similarity function selector. Default: HEURISTIC
- * Available: MINUS, GAUSSIAN, HEURISTIC
- * @param similarityParameter Parameter of the similarity function.
- */
- CV_WRAP static Ptr<PCTSignaturesSQFD> create(
- const int distanceFunction = 3,
- const int similarityFunction = 2,
- const float similarityParameter = 1.0f);
- /**
- * @brief Computes Signature Quadratic Form Distance of two signatures.
- * @param _signature0 The first signature.
- * @param _signature1 The second signature.
- */
- CV_WRAP virtual float computeQuadraticFormDistance(
- InputArray _signature0,
- InputArray _signature1) const = 0;
- /**
- * @brief Computes Signature Quadratic Form Distance between the reference signature
- * and each of the other image signatures.
- * @param sourceSignature The signature to measure distance of other signatures from.
- * @param imageSignatures Vector of signatures to measure distance from the source signature.
- * @param distances Output vector of measured distances.
- */
- CV_WRAP virtual void computeQuadraticFormDistances(
- const Mat& sourceSignature,
- const std::vector<Mat>& imageSignatures,
- std::vector<float>& distances) const = 0;
- };
- /**
- * @brief Elliptic region around an interest point.
- */
- class CV_EXPORTS Elliptic_KeyPoint : public KeyPoint
- {
- public:
- Size_<float> axes; //!< the lengths of the major and minor ellipse axes
- float si; //!< the integration scale at which the parameters were estimated
- Matx23f transf; //!< the transformation between image space and local patch space
- Elliptic_KeyPoint();
- Elliptic_KeyPoint(Point2f pt, float angle, Size axes, float size, float si);
- virtual ~Elliptic_KeyPoint();
- };
- /**
- * @brief Class implementing the Harris-Laplace feature detector as described in @cite Mikolajczyk2004.
- */
- class CV_EXPORTS_W HarrisLaplaceFeatureDetector : public Feature2D
- {
- public:
- /**
- * @brief Creates a new implementation instance.
- *
- * @param numOctaves the number of octaves in the scale-space pyramid
- * @param corn_thresh the threshold for the Harris cornerness measure
- * @param DOG_thresh the threshold for the Difference-of-Gaussians scale selection
- * @param maxCorners the maximum number of corners to consider
- * @param num_layers the number of intermediate scales per octave
- */
- CV_WRAP static Ptr<HarrisLaplaceFeatureDetector> create(
- int numOctaves=6,
- float corn_thresh=0.01f,
- float DOG_thresh=0.01f,
- int maxCorners=5000,
- int num_layers=4);
- CV_WRAP virtual void setNumOctaves(int numOctaves_) = 0;
- CV_WRAP virtual int getNumOctaves() const = 0;
- CV_WRAP virtual void setCornThresh(float corn_thresh_) = 0;
- CV_WRAP virtual float getCornThresh() const = 0;
- CV_WRAP virtual void setDOGThresh(float DOG_thresh_) = 0;
- CV_WRAP virtual float getDOGThresh() const = 0;
- CV_WRAP virtual void setMaxCorners(int maxCorners_) = 0;
- CV_WRAP virtual int getMaxCorners() const = 0;
- CV_WRAP virtual void setNumLayers(int num_layers_) = 0;
- CV_WRAP virtual int getNumLayers() const = 0;
- CV_WRAP String getDefaultName() const CV_OVERRIDE;
- };
- /**
- * @brief Class implementing affine adaptation for key points.
- *
- * A @ref FeatureDetector and a @ref DescriptorExtractor are wrapped to augment the
- * detected points with their affine invariant elliptic region and to compute
- * the feature descriptors on the regions after warping them into circles.
- *
- * The interface is equivalent to @ref Feature2D, adding operations for
- * @ref Elliptic_KeyPoint "Elliptic_KeyPoints" instead of @ref KeyPoint "KeyPoints".
- */
- class CV_EXPORTS_W AffineFeature2D : public Feature2D
- {
- public:
- /**
- * @brief Creates an instance wrapping the given keypoint detector and
- * descriptor extractor.
- */
- static Ptr<AffineFeature2D> create(
- Ptr<FeatureDetector> keypoint_detector,
- Ptr<DescriptorExtractor> descriptor_extractor);
- /**
- * @brief Creates an instance where keypoint detector and descriptor
- * extractor are identical.
- */
- static Ptr<AffineFeature2D> create(
- Ptr<FeatureDetector> keypoint_detector)
- {
- return create(keypoint_detector, keypoint_detector);
- }
- using Feature2D::detect; // overload, don't hide
- /**
- * @brief Detects keypoints in the image using the wrapped detector and
- * performs affine adaptation to augment them with their elliptic regions.
- */
- virtual void detect(
- InputArray image,
- CV_OUT std::vector<Elliptic_KeyPoint>& keypoints,
- InputArray mask=noArray() ) = 0;
- using Feature2D::detectAndCompute; // overload, don't hide
- /**
- * @brief Detects keypoints and computes descriptors for their surrounding
- * regions, after warping them into circles.
- */
- virtual void detectAndCompute(
- InputArray image,
- InputArray mask,
- CV_OUT std::vector<Elliptic_KeyPoint>& keypoints,
- OutputArray descriptors,
- bool useProvidedKeypoints=false ) = 0;
- };
- /**
- @brief Class implementing the Tree Based Morse Regions (TBMR) as described in
- @cite Najman2014 extended with scaled extraction ability.
- @param min_area prune areas smaller than minArea
- @param max_area_relative prune areas bigger than maxArea = max_area_relative *
- input_image_size
- @param scale_factor scale factor for scaled extraction.
- @param n_scales number of applications of the scale factor (octaves).
- @note This algorithm is based on Component Tree (Min/Max) as well as MSER but
- uses a Morse-theory approach to extract features.
- Features are ellipses (similar to MSER, however a MSER feature can never be a
- TBMR feature and vice versa).
- */
- class CV_EXPORTS_W TBMR : public AffineFeature2D
- {
- public:
- CV_WRAP static Ptr<TBMR> create(int min_area = 60,
- float max_area_relative = 0.01f,
- float scale_factor = 1.25f,
- int n_scales = -1);
- CV_WRAP virtual void setMinArea(int minArea) = 0;
- CV_WRAP virtual int getMinArea() const = 0;
- CV_WRAP virtual void setMaxAreaRelative(float maxArea) = 0;
- CV_WRAP virtual float getMaxAreaRelative() const = 0;
- CV_WRAP virtual void setScaleFactor(float scale_factor) = 0;
- CV_WRAP virtual float getScaleFactor() const = 0;
- CV_WRAP virtual void setNScales(int n_scales) = 0;
- CV_WRAP virtual int getNScales() const = 0;
- };
- /** @brief Estimates cornerness for prespecified KeyPoints using the FAST algorithm
- @param image grayscale image where keypoints (corners) are detected.
- @param keypoints keypoints which should be tested to fit the FAST criteria. Keypoints not being
- detected as corners are removed.
- @param threshold threshold on difference between intensity of the central pixel and pixels of a
- circle around this pixel.
- @param nonmaxSuppression if true, non-maximum suppression is applied to detected corners
- (keypoints).
- @param type one of the three neighborhoods as defined in the paper:
- FastFeatureDetector::TYPE_9_16, FastFeatureDetector::TYPE_7_12,
- FastFeatureDetector::TYPE_5_8
- Detects corners using the FAST algorithm by @cite Rosten06 .
- */
- CV_EXPORTS void FASTForPointSet( InputArray image, CV_IN_OUT std::vector<KeyPoint>& keypoints,
- int threshold, bool nonmaxSuppression=true, cv::FastFeatureDetector::DetectorType type=FastFeatureDetector::TYPE_9_16);
- //! @}
- //! @addtogroup xfeatures2d_match
- //! @{
- /** @brief GMS (Grid-based Motion Statistics) feature matching strategy described in @cite Bian2017gms .
- @param size1 Input size of image1.
- @param size2 Input size of image2.
- @param keypoints1 Input keypoints of image1.
- @param keypoints2 Input keypoints of image2.
- @param matches1to2 Input 1-nearest neighbor matches.
- @param matchesGMS Matches returned by the GMS matching strategy.
- @param withRotation Take rotation transformation into account.
- @param withScale Take scale transformation into account.
- @param thresholdFactor The higher, the less matches.
- @note
- Since GMS works well when the number of features is large, we recommend to use the ORB feature and set FastThreshold to 0 to get as many as possible features quickly.
- If matching results are not satisfying, please add more features. (We use 10000 for images with 640 X 480).
- If your images have big rotation and scale changes, please set withRotation or withScale to true.
- */
- CV_EXPORTS_W void matchGMS(const Size& size1, const Size& size2, const std::vector<KeyPoint>& keypoints1, const std::vector<KeyPoint>& keypoints2,
- const std::vector<DMatch>& matches1to2, CV_OUT std::vector<DMatch>& matchesGMS, const bool withRotation = false,
- const bool withScale = false, const double thresholdFactor = 6.0);
- /** @brief LOGOS (Local geometric support for high-outlier spatial verification) feature matching strategy described in @cite Lowry2018LOGOSLG .
- @param keypoints1 Input keypoints of image1.
- @param keypoints2 Input keypoints of image2.
- @param nn1 Index to the closest BoW centroid for each descriptors of image1.
- @param nn2 Index to the closest BoW centroid for each descriptors of image2.
- @param matches1to2 Matches returned by the LOGOS matching strategy.
- @note
- This matching strategy is suitable for features matching against large scale database.
- First step consists in constructing the bag-of-words (BoW) from a representative image database.
- Image descriptors are then represented by their closest codevector (nearest BoW centroid).
- */
- CV_EXPORTS_W void matchLOGOS(const std::vector<KeyPoint>& keypoints1, const std::vector<KeyPoint>& keypoints2,
- const std::vector<int>& nn1, const std::vector<int>& nn2,
- std::vector<DMatch>& matches1to2);
- //! @}
- }
- }
- #endif
|