Ximgproc.h 132 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751
  1. //
  2. // This file is auto-generated. Please don't modify it!
  3. //
  4. #pragma once
  5. #ifdef __cplusplus
  6. //#import "opencv.hpp"
  7. #import "opencv2/ximgproc.hpp"
  8. #else
  9. #define CV_EXPORTS
  10. #endif
  11. #import <Foundation/Foundation.h>
  12. @class AdaptiveManifoldFilter;
  13. @class ContourFitting;
  14. @class DTFilter;
  15. @class DisparityWLSFilter;
  16. @class EdgeAwareInterpolator;
  17. @class EdgeBoxes;
  18. @class EdgeDrawing;
  19. @class FastBilateralSolverFilter;
  20. @class FastGlobalSmootherFilter;
  21. @class FastLineDetector;
  22. @class GraphSegmentation;
  23. @class GuidedFilter;
  24. @class Int4;
  25. @class Mat;
  26. @class Point2i;
  27. @class RFFeatureGetter;
  28. @class RICInterpolator;
  29. @class Rect2i;
  30. @class ScanSegment;
  31. @class SelectiveSearchSegmentation;
  32. @class SelectiveSearchSegmentationStrategy;
  33. @class SelectiveSearchSegmentationStrategyColor;
  34. @class SelectiveSearchSegmentationStrategyFill;
  35. @class SelectiveSearchSegmentationStrategyMultiple;
  36. @class SelectiveSearchSegmentationStrategySize;
  37. @class SelectiveSearchSegmentationStrategyTexture;
  38. @class StereoMatcher;
  39. @class StructuredEdgeDetection;
  40. @class SuperpixelLSC;
  41. @class SuperpixelSEEDS;
  42. @class SuperpixelSLIC;
  43. // C++: enum AngleRangeOption (cv.ximgproc.AngleRangeOption)
  44. typedef NS_ENUM(int, AngleRangeOption) {
  45. ARO_0_45 = 0,
  46. ARO_45_90 = 1,
  47. ARO_90_135 = 2,
  48. ARO_315_0 = 3,
  49. ARO_315_45 = 4,
  50. ARO_45_135 = 5,
  51. ARO_315_135 = 6,
  52. ARO_CTR_HOR = 7,
  53. ARO_CTR_VER = 8
  54. };
  55. // C++: enum EdgeAwareFiltersList (cv.ximgproc.EdgeAwareFiltersList)
  56. typedef NS_ENUM(int, EdgeAwareFiltersList) {
  57. DTF_NC = 0,
  58. DTF_IC = 1,
  59. DTF_RF = 2,
  60. GUIDED_FILTER = 3,
  61. AM_FILTER = 4
  62. };
  63. // C++: enum HoughDeskewOption (cv.ximgproc.HoughDeskewOption)
  64. typedef NS_ENUM(int, HoughDeskewOption) {
  65. HDO_RAW = 0,
  66. HDO_DESKEW = 1
  67. };
  68. // C++: enum HoughOp (cv.ximgproc.HoughOp)
  69. typedef NS_ENUM(int, HoughOp) {
  70. FHT_MIN = 0,
  71. FHT_MAX = 1,
  72. FHT_ADD = 2,
  73. FHT_AVE = 3
  74. };
  75. // C++: enum LocalBinarizationMethods (cv.ximgproc.LocalBinarizationMethods)
  76. typedef NS_ENUM(int, LocalBinarizationMethods) {
  77. BINARIZATION_NIBLACK = 0,
  78. BINARIZATION_SAUVOLA = 1,
  79. BINARIZATION_WOLF = 2,
  80. BINARIZATION_NICK = 3
  81. };
  82. // C++: enum SLICType (cv.ximgproc.SLICType)
  83. typedef NS_ENUM(int, SLICType) {
  84. SLIC = 100,
  85. SLICO = 101,
  86. MSLIC = 102
  87. };
  88. // C++: enum ThinningTypes (cv.ximgproc.ThinningTypes)
  89. typedef NS_ENUM(int, ThinningTypes) {
  90. THINNING_ZHANGSUEN = 0,
  91. THINNING_GUOHALL = 1
  92. };
  93. // C++: enum WMFWeightType (cv.ximgproc.WMFWeightType)
  94. typedef NS_ENUM(int, WMFWeightType) {
  95. WMF_EXP = 1,
  96. WMF_IV1 = 1 << 1,
  97. WMF_IV2 = 1 << 2,
  98. WMF_COS = 1 << 3,
  99. WMF_JAC = 1 << 4,
  100. WMF_OFF = 1 << 5
  101. };
  102. NS_ASSUME_NONNULL_BEGIN
  103. // C++: class Ximgproc
  104. /**
  105. * The Ximgproc module
  106. *
  107. * Member classes: `DisparityFilter`, `DisparityWLSFilter`, `ScanSegment`, `DTFilter`, `GuidedFilter`, `AdaptiveManifoldFilter`, `FastBilateralSolverFilter`, `FastGlobalSmootherFilter`, `SuperpixelSLIC`, `RFFeatureGetter`, `StructuredEdgeDetection`, `SuperpixelLSC`, `EdgeBoxes`, `GraphSegmentation`, `SelectiveSearchSegmentationStrategy`, `SelectiveSearchSegmentationStrategyColor`, `SelectiveSearchSegmentationStrategySize`, `SelectiveSearchSegmentationStrategyTexture`, `SelectiveSearchSegmentationStrategyFill`, `SelectiveSearchSegmentationStrategyMultiple`, `SelectiveSearchSegmentation`, `ContourFitting`, `SparseMatchInterpolator`, `EdgeAwareInterpolator`, `RICInterpolator`, `EdgeDrawing`, `EdgeDrawingParams`, `RidgeDetectionFilter`, `SuperpixelSEEDS`, `FastLineDetector`
  108. *
  109. * Member enums: `ThinningTypes`, `LocalBinarizationMethods`, `EdgeAwareFiltersList`, `SLICType`, `WMFWeightType`, `AngleRangeOption`, `HoughOp`, `HoughDeskewOption`, `GradientOperator`
  110. */
  111. CV_EXPORTS @interface Ximgproc : NSObject
  112. #pragma mark - Class Constants
  113. @property (class, readonly) int RO_IGNORE_BORDERS NS_SWIFT_NAME(RO_IGNORE_BORDERS);
  114. @property (class, readonly) int RO_STRICT NS_SWIFT_NAME(RO_STRICT);
  115. #pragma mark - Methods
  116. //
  117. // void cv::ximgproc::niBlackThreshold(Mat _src, Mat& _dst, double maxValue, int type, int blockSize, double k, LocalBinarizationMethods binarizationMethod = BINARIZATION_NIBLACK, double r = 128)
  118. //
  119. /**
  120. * Performs thresholding on input images using Niblack's technique or some of the
  121. * popular variations it inspired.
  122. *
  123. * The function transforms a grayscale image to a binary image according to the formulae:
  124. * - **THRESH_BINARY**
  125. * `$$\newcommand{\fork}[4]{ \left\{ \begin{array}{l l} #1 & \text{#2}\\\\ #3 & \text{#4}\\\\ \end{array} \right.} dst(x,y) = \fork{\texttt{maxValue}}{if \(src(x,y) > T(x,y)\)}{0}{otherwise}$$`
  126. * - **THRESH_BINARY_INV**
  127. * `$$\newcommand{\fork}[4]{ \left\{ \begin{array}{l l} #1 & \text{#2}\\\\ #3 & \text{#4}\\\\ \end{array} \right.} dst(x,y) = \fork{0}{if \(src(x,y) > T(x,y)\)}{\texttt{maxValue}}{otherwise}$$`
  128. * where `$$T(x,y)$$` is a threshold calculated individually for each pixel.
  129. *
  130. * The threshold value `$$T(x, y)$$` is determined based on the binarization method chosen. For
  131. * classic Niblack, it is the mean minus `$$ k $$` times standard deviation of
  132. * `$$\texttt{blockSize} \times\texttt{blockSize}$$` neighborhood of `$$(x, y)$$`.
  133. *
  134. * The function can't process the image in-place.
  135. *
  136. * @param _src Source 8-bit single-channel image.
  137. * @param _dst Destination image of the same size and the same type as src.
  138. * @param maxValue Non-zero value assigned to the pixels for which the condition is satisfied,
  139. * used with the THRESH_BINARY and THRESH_BINARY_INV thresholding types.
  140. * @param type Thresholding type, see cv::ThresholdTypes.
  141. * @param blockSize Size of a pixel neighborhood that is used to calculate a threshold value
  142. * for the pixel: 3, 5, 7, and so on.
  143. * @param k The user-adjustable parameter used by Niblack and inspired techniques. For Niblack, this is
  144. * normally a value between 0 and 1 that is multiplied with the standard deviation and subtracted from
  145. * the mean.
  146. * @param binarizationMethod Binarization method to use. By default, Niblack's technique is used.
  147. * Other techniques can be specified, see cv::ximgproc::LocalBinarizationMethods.
  148. * @param r The user-adjustable parameter used by Sauvola's technique. This is the dynamic range
  149. * of standard deviation.
  150. * @see `threshold`, `adaptiveThreshold`
  151. */
  152. + (void)niBlackThreshold:(Mat*)_src _dst:(Mat*)_dst maxValue:(double)maxValue type:(int)type blockSize:(int)blockSize k:(double)k binarizationMethod:(LocalBinarizationMethods)binarizationMethod r:(double)r NS_SWIFT_NAME(niBlackThreshold(_src:_dst:maxValue:type:blockSize:k:binarizationMethod:r:));
  153. /**
  154. * Performs thresholding on input images using Niblack's technique or some of the
  155. * popular variations it inspired.
  156. *
  157. * The function transforms a grayscale image to a binary image according to the formulae:
  158. * - **THRESH_BINARY**
  159. * `$$\newcommand{\fork}[4]{ \left\{ \begin{array}{l l} #1 & \text{#2}\\\\ #3 & \text{#4}\\\\ \end{array} \right.} dst(x,y) = \fork{\texttt{maxValue}}{if \(src(x,y) > T(x,y)\)}{0}{otherwise}$$`
  160. * - **THRESH_BINARY_INV**
  161. * `$$\newcommand{\fork}[4]{ \left\{ \begin{array}{l l} #1 & \text{#2}\\\\ #3 & \text{#4}\\\\ \end{array} \right.} dst(x,y) = \fork{0}{if \(src(x,y) > T(x,y)\)}{\texttt{maxValue}}{otherwise}$$`
  162. * where `$$T(x,y)$$` is a threshold calculated individually for each pixel.
  163. *
  164. * The threshold value `$$T(x, y)$$` is determined based on the binarization method chosen. For
  165. * classic Niblack, it is the mean minus `$$ k $$` times standard deviation of
  166. * `$$\texttt{blockSize} \times\texttt{blockSize}$$` neighborhood of `$$(x, y)$$`.
  167. *
  168. * The function can't process the image in-place.
  169. *
  170. * @param _src Source 8-bit single-channel image.
  171. * @param _dst Destination image of the same size and the same type as src.
  172. * @param maxValue Non-zero value assigned to the pixels for which the condition is satisfied,
  173. * used with the THRESH_BINARY and THRESH_BINARY_INV thresholding types.
  174. * @param type Thresholding type, see cv::ThresholdTypes.
  175. * @param blockSize Size of a pixel neighborhood that is used to calculate a threshold value
  176. * for the pixel: 3, 5, 7, and so on.
  177. * @param k The user-adjustable parameter used by Niblack and inspired techniques. For Niblack, this is
  178. * normally a value between 0 and 1 that is multiplied with the standard deviation and subtracted from
  179. * the mean.
  180. * @param binarizationMethod Binarization method to use. By default, Niblack's technique is used.
  181. * Other techniques can be specified, see cv::ximgproc::LocalBinarizationMethods.
  182. * of standard deviation.
  183. * @see `threshold`, `adaptiveThreshold`
  184. */
  185. + (void)niBlackThreshold:(Mat*)_src _dst:(Mat*)_dst maxValue:(double)maxValue type:(int)type blockSize:(int)blockSize k:(double)k binarizationMethod:(LocalBinarizationMethods)binarizationMethod NS_SWIFT_NAME(niBlackThreshold(_src:_dst:maxValue:type:blockSize:k:binarizationMethod:));
  186. /**
  187. * Performs thresholding on input images using Niblack's technique or some of the
  188. * popular variations it inspired.
  189. *
  190. * The function transforms a grayscale image to a binary image according to the formulae:
  191. * - **THRESH_BINARY**
  192. * `$$\newcommand{\fork}[4]{ \left\{ \begin{array}{l l} #1 & \text{#2}\\\\ #3 & \text{#4}\\\\ \end{array} \right.} dst(x,y) = \fork{\texttt{maxValue}}{if \(src(x,y) > T(x,y)\)}{0}{otherwise}$$`
  193. * - **THRESH_BINARY_INV**
  194. * `$$\newcommand{\fork}[4]{ \left\{ \begin{array}{l l} #1 & \text{#2}\\\\ #3 & \text{#4}\\\\ \end{array} \right.} dst(x,y) = \fork{0}{if \(src(x,y) > T(x,y)\)}{\texttt{maxValue}}{otherwise}$$`
  195. * where `$$T(x,y)$$` is a threshold calculated individually for each pixel.
  196. *
  197. * The threshold value `$$T(x, y)$$` is determined based on the binarization method chosen. For
  198. * classic Niblack, it is the mean minus `$$ k $$` times standard deviation of
  199. * `$$\texttt{blockSize} \times\texttt{blockSize}$$` neighborhood of `$$(x, y)$$`.
  200. *
  201. * The function can't process the image in-place.
  202. *
  203. * @param _src Source 8-bit single-channel image.
  204. * @param _dst Destination image of the same size and the same type as src.
  205. * @param maxValue Non-zero value assigned to the pixels for which the condition is satisfied,
  206. * used with the THRESH_BINARY and THRESH_BINARY_INV thresholding types.
  207. * @param type Thresholding type, see cv::ThresholdTypes.
  208. * @param blockSize Size of a pixel neighborhood that is used to calculate a threshold value
  209. * for the pixel: 3, 5, 7, and so on.
  210. * @param k The user-adjustable parameter used by Niblack and inspired techniques. For Niblack, this is
  211. * normally a value between 0 and 1 that is multiplied with the standard deviation and subtracted from
  212. * the mean.
  213. * Other techniques can be specified, see cv::ximgproc::LocalBinarizationMethods.
  214. * of standard deviation.
  215. * @see `threshold`, `adaptiveThreshold`
  216. */
  217. + (void)niBlackThreshold:(Mat*)_src _dst:(Mat*)_dst maxValue:(double)maxValue type:(int)type blockSize:(int)blockSize k:(double)k NS_SWIFT_NAME(niBlackThreshold(_src:_dst:maxValue:type:blockSize:k:));
  218. //
  219. // void cv::ximgproc::thinning(Mat src, Mat& dst, ThinningTypes thinningType = THINNING_ZHANGSUEN)
  220. //
  221. /**
  222. * Applies a binary blob thinning operation, to achieve a skeletization of the input image.
  223. *
  224. * The function transforms a binary blob image into a skeletized form using the technique of Zhang-Suen.
  225. *
  226. * @param src Source 8-bit single-channel image, containing binary blobs, with blobs having 255 pixel values.
  227. * @param dst Destination image of the same size and the same type as src. The function can work in-place.
  228. * @param thinningType Value that defines which thinning algorithm should be used. See cv::ximgproc::ThinningTypes
  229. */
  230. + (void)thinning:(Mat*)src dst:(Mat*)dst thinningType:(ThinningTypes)thinningType NS_SWIFT_NAME(thinning(src:dst:thinningType:));
  231. /**
  232. * Applies a binary blob thinning operation, to achieve a skeletization of the input image.
  233. *
  234. * The function transforms a binary blob image into a skeletized form using the technique of Zhang-Suen.
  235. *
  236. * @param src Source 8-bit single-channel image, containing binary blobs, with blobs having 255 pixel values.
  237. * @param dst Destination image of the same size and the same type as src. The function can work in-place.
  238. */
  239. + (void)thinning:(Mat*)src dst:(Mat*)dst NS_SWIFT_NAME(thinning(src:dst:));
  240. //
  241. // void cv::ximgproc::anisotropicDiffusion(Mat src, Mat& dst, float alpha, float K, int niters)
  242. //
  243. /**
  244. * Performs anisotropic diffusion on an image.
  245. *
  246. * The function applies Perona-Malik anisotropic diffusion to an image. This is the solution to the partial differential equation:
  247. *
  248. * `$${\frac {\partial I}{\partial t}}={\mathrm {div}}\left(c(x,y,t)\nabla I\right)=\nabla c\cdot \nabla I+c(x,y,t)\Delta I$$`
  249. *
  250. * Suggested functions for c(x,y,t) are:
  251. *
  252. * `$$c\left(\|\nabla I\|\right)=e^{{-\left(\|\nabla I\|/K\right)^{2}}}$$`
  253. *
  254. * or
  255. *
  256. * `$$ c\left(\|\nabla I\|\right)={\frac {1}{1+\left({\frac {\|\nabla I\|}{K}}\right)^{2}}} $$`
  257. *
  258. * @param src Source image with 3 channels.
  259. * @param dst Destination image of the same size and the same number of channels as src .
  260. * @param alpha The amount of time to step forward by on each iteration (normally, it's between 0 and 1).
  261. * @param K sensitivity to the edges
  262. * @param niters The number of iterations
  263. */
  264. + (void)anisotropicDiffusion:(Mat*)src dst:(Mat*)dst alpha:(float)alpha K:(float)K niters:(int)niters NS_SWIFT_NAME(anisotropicDiffusion(src:dst:alpha:K:niters:));
  265. //
  266. // void cv::ximgproc::GradientDericheY(Mat op, Mat& dst, double alpha, double omega)
  267. //
  268. /**
  269. * Applies Y Deriche filter to an image.
  270. *
  271. * For more details about this implementation, please see http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.476.5736&rep=rep1&type=pdf
  272. *
  273. *
  274. */
  275. + (void)GradientDericheY:(Mat*)op dst:(Mat*)dst alpha:(double)alpha omega:(double)omega NS_SWIFT_NAME(GradientDericheY(op:dst:alpha:omega:));
  276. //
  277. // void cv::ximgproc::GradientDericheX(Mat op, Mat& dst, double alpha, double omega)
  278. //
  279. /**
  280. * Applies X Deriche filter to an image.
  281. *
  282. * For more details about this implementation, please see http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.476.5736&rep=rep1&type=pdf
  283. *
  284. *
  285. */
  286. + (void)GradientDericheX:(Mat*)op dst:(Mat*)dst alpha:(double)alpha omega:(double)omega NS_SWIFT_NAME(GradientDericheX(op:dst:alpha:omega:));
  287. //
  288. // Ptr_DisparityWLSFilter cv::ximgproc::createDisparityWLSFilter(Ptr_StereoMatcher matcher_left)
  289. //
  290. /**
  291. * Convenience factory method that creates an instance of DisparityWLSFilter and sets up all the relevant
  292. * filter parameters automatically based on the matcher instance. Currently supports only StereoBM and StereoSGBM.
  293. *
  294. * @param matcher_left stereo matcher instance that will be used with the filter
  295. */
  296. + (DisparityWLSFilter*)createDisparityWLSFilter:(StereoMatcher*)matcher_left NS_SWIFT_NAME(createDisparityWLSFilter(matcher_left:));
  297. //
  298. // Ptr_StereoMatcher cv::ximgproc::createRightMatcher(Ptr_StereoMatcher matcher_left)
  299. //
  300. /**
  301. * Convenience method to set up the matcher for computing the right-view disparity map
  302. * that is required in case of filtering with confidence.
  303. *
  304. * @param matcher_left main stereo matcher instance that will be used with the filter
  305. */
  306. + (StereoMatcher*)createRightMatcher:(StereoMatcher*)matcher_left NS_SWIFT_NAME(createRightMatcher(matcher_left:));
  307. //
  308. // Ptr_DisparityWLSFilter cv::ximgproc::createDisparityWLSFilterGeneric(bool use_confidence)
  309. //
  310. /**
  311. * More generic factory method, create instance of DisparityWLSFilter and execute basic
  312. * initialization routines. When using this method you will need to set-up the ROI, matchers and
  313. * other parameters by yourself.
  314. *
  315. * @param use_confidence filtering with confidence requires two disparity maps (for the left and right views) and is
  316. * approximately two times slower. However, quality is typically significantly better.
  317. */
  318. + (DisparityWLSFilter*)createDisparityWLSFilterGeneric:(BOOL)use_confidence NS_SWIFT_NAME(createDisparityWLSFilterGeneric(use_confidence:));
  319. //
  320. // int cv::ximgproc::readGT(String src_path, Mat& dst)
  321. //
  322. /**
  323. * Function for reading ground truth disparity maps. Supports basic Middlebury
  324. * and MPI-Sintel formats. Note that the resulting disparity map is scaled by 16.
  325. *
  326. * @param src_path path to the image, containing ground-truth disparity map
  327. *
  328. * @param dst output disparity map, CV_16S depth
  329. *
  330. * @result returns zero if successfully read the ground truth
  331. */
  332. + (int)readGT:(NSString*)src_path dst:(Mat*)dst NS_SWIFT_NAME(readGT(src_path:dst:));
  333. //
  334. // double cv::ximgproc::computeMSE(Mat GT, Mat src, Rect ROI)
  335. //
  336. /**
  337. * Function for computing mean square error for disparity maps
  338. *
  339. * @param GT ground truth disparity map
  340. *
  341. * @param src disparity map to evaluate
  342. *
  343. * @param ROI region of interest
  344. *
  345. * @result returns mean square error between GT and src
  346. */
  347. + (double)computeMSE:(Mat*)GT src:(Mat*)src ROI:(Rect2i*)ROI NS_SWIFT_NAME(computeMSE(GT:src:ROI:));
  348. //
  349. // double cv::ximgproc::computeBadPixelPercent(Mat GT, Mat src, Rect ROI, int thresh = 24)
  350. //
  351. /**
  352. * Function for computing the percent of "bad" pixels in the disparity map
  353. * (pixels where error is higher than a specified threshold)
  354. *
  355. * @param GT ground truth disparity map
  356. *
  357. * @param src disparity map to evaluate
  358. *
  359. * @param ROI region of interest
  360. *
  361. * @param thresh threshold used to determine "bad" pixels
  362. *
  363. * @result returns mean square error between GT and src
  364. */
  365. + (double)computeBadPixelPercent:(Mat*)GT src:(Mat*)src ROI:(Rect2i*)ROI thresh:(int)thresh NS_SWIFT_NAME(computeBadPixelPercent(GT:src:ROI:thresh:));
  366. /**
  367. * Function for computing the percent of "bad" pixels in the disparity map
  368. * (pixels where error is higher than a specified threshold)
  369. *
  370. * @param GT ground truth disparity map
  371. *
  372. * @param src disparity map to evaluate
  373. *
  374. * @param ROI region of interest
  375. *
  376. *
  377. * @result returns mean square error between GT and src
  378. */
  379. + (double)computeBadPixelPercent:(Mat*)GT src:(Mat*)src ROI:(Rect2i*)ROI NS_SWIFT_NAME(computeBadPixelPercent(GT:src:ROI:));
  380. //
  381. // void cv::ximgproc::getDisparityVis(Mat src, Mat& dst, double scale = 1.0)
  382. //
  383. /**
  384. * Function for creating a disparity map visualization (clamped CV_8U image)
  385. *
  386. * @param src input disparity map (CV_16S depth)
  387. *
  388. * @param dst output visualization
  389. *
  390. * @param scale disparity map will be multiplied by this value for visualization
  391. */
  392. + (void)getDisparityVis:(Mat*)src dst:(Mat*)dst scale:(double)scale NS_SWIFT_NAME(getDisparityVis(src:dst:scale:));
  393. /**
  394. * Function for creating a disparity map visualization (clamped CV_8U image)
  395. *
  396. * @param src input disparity map (CV_16S depth)
  397. *
  398. * @param dst output visualization
  399. *
  400. */
  401. + (void)getDisparityVis:(Mat*)src dst:(Mat*)dst NS_SWIFT_NAME(getDisparityVis(src:dst:));
  402. //
  403. // void cv::ximgproc::edgePreservingFilter(Mat src, Mat& dst, int d, double threshold)
  404. //
  405. /**
  406. * Smoothes an image using the Edge-Preserving filter.
  407. *
  408. * The function smoothes Gaussian noise as well as salt & pepper noise.
  409. * For more details about this implementation, please see
  410. * [ReiWoe18] Reich, S. and Wörgötter, F. and Dellen, B. (2018). A Real-Time Edge-Preserving Denoising Filter. Proceedings of the 13th International Joint Conference on Computer Vision, Imaging and Computer Graphics Theory and Applications (VISIGRAPP): Visapp, 85-94, 4. DOI: 10.5220/0006509000850094.
  411. *
  412. * @param src Source 8-bit 3-channel image.
  413. * @param dst Destination image of the same size and type as src.
  414. * @param d Diameter of each pixel neighborhood that is used during filtering. Must be greater or equal 3.
  415. * @param threshold Threshold, which distinguishes between noise, outliers, and data.
  416. */
  417. + (void)edgePreservingFilter:(Mat*)src dst:(Mat*)dst d:(int)d threshold:(double)threshold NS_SWIFT_NAME(edgePreservingFilter(src:dst:d:threshold:));
  418. //
  419. // Ptr_ScanSegment cv::ximgproc::createScanSegment(int image_width, int image_height, int num_superpixels, int slices = 8, bool merge_small = true)
  420. //
  421. /**
  422. * Initializes a ScanSegment object.
  423. *
  424. * The function initializes a ScanSegment object for the input image. It stores the parameters of
  425. * the image: image_width and image_height. It also sets the parameters of the F-DBSCAN superpixel
  426. * algorithm, which are: num_superpixels, threads, and merge_small.
  427. *
  428. * @param image_width Image width.
  429. * @param image_height Image height.
  430. * @param num_superpixels Desired number of superpixels. Note that the actual number may be smaller
  431. * due to restrictions (depending on the image size). Use getNumberOfSuperpixels() to
  432. * get the actual number.
  433. * @param slices Number of processing threads for parallelisation. Setting -1 uses the maximum number
  434. * of threads. In practice, four threads is enough for smaller images and eight threads for larger ones.
  435. * @param merge_small merge small segments to give the desired number of superpixels. Processing is
  436. * much faster without merging, but many small segments will be left in the image.
  437. */
  438. + (ScanSegment*)createScanSegment:(int)image_width image_height:(int)image_height num_superpixels:(int)num_superpixels slices:(int)slices merge_small:(BOOL)merge_small NS_SWIFT_NAME(createScanSegment(image_width:image_height:num_superpixels:slices:merge_small:));
  439. /**
  440. * Initializes a ScanSegment object.
  441. *
  442. * The function initializes a ScanSegment object for the input image. It stores the parameters of
  443. * the image: image_width and image_height. It also sets the parameters of the F-DBSCAN superpixel
  444. * algorithm, which are: num_superpixels, threads, and merge_small.
  445. *
  446. * @param image_width Image width.
  447. * @param image_height Image height.
  448. * @param num_superpixels Desired number of superpixels. Note that the actual number may be smaller
  449. * due to restrictions (depending on the image size). Use getNumberOfSuperpixels() to
  450. * get the actual number.
  451. * @param slices Number of processing threads for parallelisation. Setting -1 uses the maximum number
  452. * of threads. In practice, four threads is enough for smaller images and eight threads for larger ones.
  453. * much faster without merging, but many small segments will be left in the image.
  454. */
  455. + (ScanSegment*)createScanSegment:(int)image_width image_height:(int)image_height num_superpixels:(int)num_superpixels slices:(int)slices NS_SWIFT_NAME(createScanSegment(image_width:image_height:num_superpixels:slices:));
  456. /**
  457. * Initializes a ScanSegment object.
  458. *
  459. * The function initializes a ScanSegment object for the input image. It stores the parameters of
  460. * the image: image_width and image_height. It also sets the parameters of the F-DBSCAN superpixel
  461. * algorithm, which are: num_superpixels, threads, and merge_small.
  462. *
  463. * @param image_width Image width.
  464. * @param image_height Image height.
  465. * @param num_superpixels Desired number of superpixels. Note that the actual number may be smaller
  466. * due to restrictions (depending on the image size). Use getNumberOfSuperpixels() to
  467. * get the actual number.
  468. * of threads. In practice, four threads is enough for smaller images and eight threads for larger ones.
  469. * much faster without merging, but many small segments will be left in the image.
  470. */
  471. + (ScanSegment*)createScanSegment:(int)image_width image_height:(int)image_height num_superpixels:(int)num_superpixels NS_SWIFT_NAME(createScanSegment(image_width:image_height:num_superpixels:));
  472. //
  473. // Ptr_DTFilter cv::ximgproc::createDTFilter(Mat guide, double sigmaSpatial, double sigmaColor, EdgeAwareFiltersList mode = DTF_NC, int numIters = 3)
  474. //
  475. /**
  476. * Factory method, create instance of DTFilter and produce initialization routines.
  477. *
  478. * @param guide guided image (used to build transformed distance, which describes edge structure of
  479. * guided image).
  480. *
  481. * @param sigmaSpatial `$${\sigma}_H$$` parameter in the original article, it's similar to the sigma in the
  482. * coordinate space into bilateralFilter.
  483. *
  484. * @param sigmaColor `$${\sigma}_r$$` parameter in the original article, it's similar to the sigma in the
  485. * color space into bilateralFilter.
  486. *
  487. * @param mode one form three modes DTF_NC, DTF_RF and DTF_IC which corresponds to three modes for
  488. * filtering 2D signals in the article.
  489. *
  490. * @param numIters optional number of iterations used for filtering, 3 is quite enough.
  491. *
  492. * For more details about Domain Transform filter parameters, see the original article CITE: Gastal11 and
  493. * [Domain Transform filter homepage](http://www.inf.ufrgs.br/~eslgastal/DomainTransform/).
  494. */
  495. + (DTFilter*)createDTFilter:(Mat*)guide sigmaSpatial:(double)sigmaSpatial sigmaColor:(double)sigmaColor mode:(EdgeAwareFiltersList)mode numIters:(int)numIters NS_SWIFT_NAME(createDTFilter(guide:sigmaSpatial:sigmaColor:mode:numIters:));
  496. /**
  497. * Factory method, create instance of DTFilter and produce initialization routines.
  498. *
  499. * @param guide guided image (used to build transformed distance, which describes edge structure of
  500. * guided image).
  501. *
  502. * @param sigmaSpatial `$${\sigma}_H$$` parameter in the original article, it's similar to the sigma in the
  503. * coordinate space into bilateralFilter.
  504. *
  505. * @param sigmaColor `$${\sigma}_r$$` parameter in the original article, it's similar to the sigma in the
  506. * color space into bilateralFilter.
  507. *
  508. * @param mode one form three modes DTF_NC, DTF_RF and DTF_IC which corresponds to three modes for
  509. * filtering 2D signals in the article.
  510. *
  511. *
  512. * For more details about Domain Transform filter parameters, see the original article CITE: Gastal11 and
  513. * [Domain Transform filter homepage](http://www.inf.ufrgs.br/~eslgastal/DomainTransform/).
  514. */
  515. + (DTFilter*)createDTFilter:(Mat*)guide sigmaSpatial:(double)sigmaSpatial sigmaColor:(double)sigmaColor mode:(EdgeAwareFiltersList)mode NS_SWIFT_NAME(createDTFilter(guide:sigmaSpatial:sigmaColor:mode:));
  516. /**
  517. * Factory method, create instance of DTFilter and produce initialization routines.
  518. *
  519. * @param guide guided image (used to build transformed distance, which describes edge structure of
  520. * guided image).
  521. *
  522. * @param sigmaSpatial `$${\sigma}_H$$` parameter in the original article, it's similar to the sigma in the
  523. * coordinate space into bilateralFilter.
  524. *
  525. * @param sigmaColor `$${\sigma}_r$$` parameter in the original article, it's similar to the sigma in the
  526. * color space into bilateralFilter.
  527. *
  528. * filtering 2D signals in the article.
  529. *
  530. *
  531. * For more details about Domain Transform filter parameters, see the original article CITE: Gastal11 and
  532. * [Domain Transform filter homepage](http://www.inf.ufrgs.br/~eslgastal/DomainTransform/).
  533. */
  534. + (DTFilter*)createDTFilter:(Mat*)guide sigmaSpatial:(double)sigmaSpatial sigmaColor:(double)sigmaColor NS_SWIFT_NAME(createDTFilter(guide:sigmaSpatial:sigmaColor:));
  535. //
  536. // void cv::ximgproc::dtFilter(Mat guide, Mat src, Mat& dst, double sigmaSpatial, double sigmaColor, EdgeAwareFiltersList mode = DTF_NC, int numIters = 3)
  537. //
  538. /**
  539. * Simple one-line Domain Transform filter call. If you have multiple images to filter with the same
  540. * guided image then use DTFilter interface to avoid extra computations on initialization stage.
  541. *
  542. * @param guide guided image (also called as joint image) with unsigned 8-bit or floating-point 32-bit
  543. * depth and up to 4 channels.
  544. * @param src filtering image with unsigned 8-bit or floating-point 32-bit depth and up to 4 channels.
  545. * @param dst destination image
  546. * @param sigmaSpatial `$${\sigma}_H$$` parameter in the original article, it's similar to the sigma in the
  547. * coordinate space into bilateralFilter.
  548. * @param sigmaColor `$${\sigma}_r$$` parameter in the original article, it's similar to the sigma in the
  549. * color space into bilateralFilter.
  550. * @param mode one form three modes DTF_NC, DTF_RF and DTF_IC which corresponds to three modes for
  551. * filtering 2D signals in the article.
  552. * @param numIters optional number of iterations used for filtering, 3 is quite enough.
  553. * @see `bilateralFilter`, `+guidedFilter:src:dst:radius:eps:dDepth:`, `+amFilter:src:dst:sigma_s:sigma_r:adjust_outliers:`
  554. */
  555. + (void)dtFilter:(Mat*)guide src:(Mat*)src dst:(Mat*)dst sigmaSpatial:(double)sigmaSpatial sigmaColor:(double)sigmaColor mode:(EdgeAwareFiltersList)mode numIters:(int)numIters NS_SWIFT_NAME(dtFilter(guide:src:dst:sigmaSpatial:sigmaColor:mode:numIters:));
  556. /**
  557. * Simple one-line Domain Transform filter call. If you have multiple images to filter with the same
  558. * guided image then use DTFilter interface to avoid extra computations on initialization stage.
  559. *
  560. * @param guide guided image (also called as joint image) with unsigned 8-bit or floating-point 32-bit
  561. * depth and up to 4 channels.
  562. * @param src filtering image with unsigned 8-bit or floating-point 32-bit depth and up to 4 channels.
  563. * @param dst destination image
  564. * @param sigmaSpatial `$${\sigma}_H$$` parameter in the original article, it's similar to the sigma in the
  565. * coordinate space into bilateralFilter.
  566. * @param sigmaColor `$${\sigma}_r$$` parameter in the original article, it's similar to the sigma in the
  567. * color space into bilateralFilter.
  568. * @param mode one form three modes DTF_NC, DTF_RF and DTF_IC which corresponds to three modes for
  569. * filtering 2D signals in the article.
  570. * @see `bilateralFilter`, `+guidedFilter:src:dst:radius:eps:dDepth:`, `+amFilter:src:dst:sigma_s:sigma_r:adjust_outliers:`
  571. */
  572. + (void)dtFilter:(Mat*)guide src:(Mat*)src dst:(Mat*)dst sigmaSpatial:(double)sigmaSpatial sigmaColor:(double)sigmaColor mode:(EdgeAwareFiltersList)mode NS_SWIFT_NAME(dtFilter(guide:src:dst:sigmaSpatial:sigmaColor:mode:));
  573. /**
  574. * Simple one-line Domain Transform filter call. If you have multiple images to filter with the same
  575. * guided image then use DTFilter interface to avoid extra computations on initialization stage.
  576. *
  577. * @param guide guided image (also called as joint image) with unsigned 8-bit or floating-point 32-bit
  578. * depth and up to 4 channels.
  579. * @param src filtering image with unsigned 8-bit or floating-point 32-bit depth and up to 4 channels.
  580. * @param dst destination image
  581. * @param sigmaSpatial `$${\sigma}_H$$` parameter in the original article, it's similar to the sigma in the
  582. * coordinate space into bilateralFilter.
  583. * @param sigmaColor `$${\sigma}_r$$` parameter in the original article, it's similar to the sigma in the
  584. * color space into bilateralFilter.
  585. * filtering 2D signals in the article.
  586. * @see `bilateralFilter`, `+guidedFilter:src:dst:radius:eps:dDepth:`, `+amFilter:src:dst:sigma_s:sigma_r:adjust_outliers:`
  587. */
  588. + (void)dtFilter:(Mat*)guide src:(Mat*)src dst:(Mat*)dst sigmaSpatial:(double)sigmaSpatial sigmaColor:(double)sigmaColor NS_SWIFT_NAME(dtFilter(guide:src:dst:sigmaSpatial:sigmaColor:));
  589. //
  590. // Ptr_GuidedFilter cv::ximgproc::createGuidedFilter(Mat guide, int radius, double eps)
  591. //
  592. /**
  593. * Factory method, create instance of GuidedFilter and produce initialization routines.
  594. *
  595. * @param guide guided image (or array of images) with up to 3 channels, if it have more then 3
  596. * channels then only first 3 channels will be used.
  597. *
  598. * @param radius radius of Guided Filter.
  599. *
  600. * @param eps regularization term of Guided Filter. `$${eps}^2$$` is similar to the sigma in the color
  601. * space into bilateralFilter.
  602. *
  603. * For more details about Guided Filter parameters, see the original article CITE: Kaiming10 .
  604. */
  605. + (GuidedFilter*)createGuidedFilter:(Mat*)guide radius:(int)radius eps:(double)eps NS_SWIFT_NAME(createGuidedFilter(guide:radius:eps:));
  606. //
  607. // void cv::ximgproc::guidedFilter(Mat guide, Mat src, Mat& dst, int radius, double eps, int dDepth = -1)
  608. //
  609. /**
  610. * Simple one-line Guided Filter call.
  611. *
  612. * If you have multiple images to filter with the same guided image then use GuidedFilter interface to
  613. * avoid extra computations on initialization stage.
  614. *
  615. * @param guide guided image (or array of images) with up to 3 channels, if it have more then 3
  616. * channels then only first 3 channels will be used.
  617. *
  618. * @param src filtering image with any numbers of channels.
  619. *
  620. * @param dst output image.
  621. *
  622. * @param radius radius of Guided Filter.
  623. *
  624. * @param eps regularization term of Guided Filter. `$${eps}^2$$` is similar to the sigma in the color
  625. * space into bilateralFilter.
  626. *
  627. * @param dDepth optional depth of the output image.
  628. *
  629. * @see `bilateralFilter`, `+dtFilter:src:dst:sigmaSpatial:sigmaColor:mode:numIters:`, `+amFilter:src:dst:sigma_s:sigma_r:adjust_outliers:`
  630. */
  631. + (void)guidedFilter:(Mat*)guide src:(Mat*)src dst:(Mat*)dst radius:(int)radius eps:(double)eps dDepth:(int)dDepth NS_SWIFT_NAME(guidedFilter(guide:src:dst:radius:eps:dDepth:));
  632. /**
  633. * Simple one-line Guided Filter call.
  634. *
  635. * If you have multiple images to filter with the same guided image then use GuidedFilter interface to
  636. * avoid extra computations on initialization stage.
  637. *
  638. * @param guide guided image (or array of images) with up to 3 channels, if it have more then 3
  639. * channels then only first 3 channels will be used.
  640. *
  641. * @param src filtering image with any numbers of channels.
  642. *
  643. * @param dst output image.
  644. *
  645. * @param radius radius of Guided Filter.
  646. *
  647. * @param eps regularization term of Guided Filter. `$${eps}^2$$` is similar to the sigma in the color
  648. * space into bilateralFilter.
  649. *
  650. *
  651. * @see `bilateralFilter`, `+dtFilter:src:dst:sigmaSpatial:sigmaColor:mode:numIters:`, `+amFilter:src:dst:sigma_s:sigma_r:adjust_outliers:`
  652. */
  653. + (void)guidedFilter:(Mat*)guide src:(Mat*)src dst:(Mat*)dst radius:(int)radius eps:(double)eps NS_SWIFT_NAME(guidedFilter(guide:src:dst:radius:eps:));
  654. //
  655. // Ptr_AdaptiveManifoldFilter cv::ximgproc::createAMFilter(double sigma_s, double sigma_r, bool adjust_outliers = false)
  656. //
  657. /**
  658. * Factory method, create instance of AdaptiveManifoldFilter and produce some initialization routines.
  659. *
  660. * @param sigma_s spatial standard deviation.
  661. *
  662. * @param sigma_r color space standard deviation, it is similar to the sigma in the color space into
  663. * bilateralFilter.
  664. *
  665. * @param adjust_outliers optional, specify perform outliers adjust operation or not, (Eq. 9) in the
  666. * original paper.
  667. *
  668. * For more details about Adaptive Manifold Filter parameters, see the original article CITE: Gastal12 .
  669. *
  670. * NOTE: Joint images with CV_8U and CV_16U depth converted to images with CV_32F depth and [0; 1]
  671. * color range before processing. Hence color space sigma sigma_r must be in [0; 1] range, unlike same
  672. * sigmas in bilateralFilter and dtFilter functions.
  673. */
  674. + (AdaptiveManifoldFilter*)createAMFilter:(double)sigma_s sigma_r:(double)sigma_r adjust_outliers:(BOOL)adjust_outliers NS_SWIFT_NAME(createAMFilter(sigma_s:sigma_r:adjust_outliers:));
  675. /**
  676. * Factory method, create instance of AdaptiveManifoldFilter and produce some initialization routines.
  677. *
  678. * @param sigma_s spatial standard deviation.
  679. *
  680. * @param sigma_r color space standard deviation, it is similar to the sigma in the color space into
  681. * bilateralFilter.
  682. *
  683. * original paper.
  684. *
  685. * For more details about Adaptive Manifold Filter parameters, see the original article CITE: Gastal12 .
  686. *
  687. * NOTE: Joint images with CV_8U and CV_16U depth converted to images with CV_32F depth and [0; 1]
  688. * color range before processing. Hence color space sigma sigma_r must be in [0; 1] range, unlike same
  689. * sigmas in bilateralFilter and dtFilter functions.
  690. */
  691. + (AdaptiveManifoldFilter*)createAMFilter:(double)sigma_s sigma_r:(double)sigma_r NS_SWIFT_NAME(createAMFilter(sigma_s:sigma_r:));
  692. //
  693. // void cv::ximgproc::amFilter(Mat joint, Mat src, Mat& dst, double sigma_s, double sigma_r, bool adjust_outliers = false)
  694. //
  695. /**
  696. * Simple one-line Adaptive Manifold Filter call.
  697. *
  698. * @param joint joint (also called as guided) image or array of images with any numbers of channels.
  699. *
  700. * @param src filtering image with any numbers of channels.
  701. *
  702. * @param dst output image.
  703. *
  704. * @param sigma_s spatial standard deviation.
  705. *
  706. * @param sigma_r color space standard deviation, it is similar to the sigma in the color space into
  707. * bilateralFilter.
  708. *
  709. * @param adjust_outliers optional, specify perform outliers adjust operation or not, (Eq. 9) in the
  710. * original paper.
  711. *
  712. * NOTE: Joint images with CV_8U and CV_16U depth converted to images with CV_32F depth and [0; 1]
  713. * color range before processing. Hence color space sigma sigma_r must be in [0; 1] range, unlike same
  714. * sigmas in bilateralFilter and dtFilter functions. @see `bilateralFilter`, `+dtFilter:src:dst:sigmaSpatial:sigmaColor:mode:numIters:`, `+guidedFilter:src:dst:radius:eps:dDepth:`
  715. */
  716. + (void)amFilter:(Mat*)joint src:(Mat*)src dst:(Mat*)dst sigma_s:(double)sigma_s sigma_r:(double)sigma_r adjust_outliers:(BOOL)adjust_outliers NS_SWIFT_NAME(amFilter(joint:src:dst:sigma_s:sigma_r:adjust_outliers:));
  717. /**
  718. * Simple one-line Adaptive Manifold Filter call.
  719. *
  720. * @param joint joint (also called as guided) image or array of images with any numbers of channels.
  721. *
  722. * @param src filtering image with any numbers of channels.
  723. *
  724. * @param dst output image.
  725. *
  726. * @param sigma_s spatial standard deviation.
  727. *
  728. * @param sigma_r color space standard deviation, it is similar to the sigma in the color space into
  729. * bilateralFilter.
  730. *
  731. * original paper.
  732. *
  733. * NOTE: Joint images with CV_8U and CV_16U depth converted to images with CV_32F depth and [0; 1]
  734. * color range before processing. Hence color space sigma sigma_r must be in [0; 1] range, unlike same
  735. * sigmas in bilateralFilter and dtFilter functions. @see `bilateralFilter`, `+dtFilter:src:dst:sigmaSpatial:sigmaColor:mode:numIters:`, `+guidedFilter:src:dst:radius:eps:dDepth:`
  736. */
  737. + (void)amFilter:(Mat*)joint src:(Mat*)src dst:(Mat*)dst sigma_s:(double)sigma_s sigma_r:(double)sigma_r NS_SWIFT_NAME(amFilter(joint:src:dst:sigma_s:sigma_r:));
  738. //
  739. // void cv::ximgproc::jointBilateralFilter(Mat joint, Mat src, Mat& dst, int d, double sigmaColor, double sigmaSpace, int borderType = BORDER_DEFAULT)
  740. //
  741. /**
  742. * Applies the joint bilateral filter to an image.
  743. *
  744. * @param joint Joint 8-bit or floating-point, 1-channel or 3-channel image.
  745. *
  746. * @param src Source 8-bit or floating-point, 1-channel or 3-channel image with the same depth as joint
  747. * image.
  748. *
  749. * @param dst Destination image of the same size and type as src .
  750. *
  751. * @param d Diameter of each pixel neighborhood that is used during filtering. If it is non-positive,
  752. * it is computed from sigmaSpace .
  753. *
  754. * @param sigmaColor Filter sigma in the color space. A larger value of the parameter means that
  755. * farther colors within the pixel neighborhood (see sigmaSpace ) will be mixed together, resulting in
  756. * larger areas of semi-equal color.
  757. *
  758. * @param sigmaSpace Filter sigma in the coordinate space. A larger value of the parameter means that
  759. * farther pixels will influence each other as long as their colors are close enough (see sigmaColor ).
  760. * When d\>0 , it specifies the neighborhood size regardless of sigmaSpace . Otherwise, d is
  761. * proportional to sigmaSpace .
  762. *
  763. * @param borderType
  764. *
  765. * NOTE: bilateralFilter and jointBilateralFilter use L1 norm to compute difference between colors.
  766. *
  767. * @see `bilateralFilter`, `+amFilter:src:dst:sigma_s:sigma_r:adjust_outliers:`
  768. */
  769. + (void)jointBilateralFilter:(Mat*)joint src:(Mat*)src dst:(Mat*)dst d:(int)d sigmaColor:(double)sigmaColor sigmaSpace:(double)sigmaSpace borderType:(int)borderType NS_SWIFT_NAME(jointBilateralFilter(joint:src:dst:d:sigmaColor:sigmaSpace:borderType:));
  770. /**
  771. * Applies the joint bilateral filter to an image.
  772. *
  773. * @param joint Joint 8-bit or floating-point, 1-channel or 3-channel image.
  774. *
  775. * @param src Source 8-bit or floating-point, 1-channel or 3-channel image with the same depth as joint
  776. * image.
  777. *
  778. * @param dst Destination image of the same size and type as src .
  779. *
  780. * @param d Diameter of each pixel neighborhood that is used during filtering. If it is non-positive,
  781. * it is computed from sigmaSpace .
  782. *
  783. * @param sigmaColor Filter sigma in the color space. A larger value of the parameter means that
  784. * farther colors within the pixel neighborhood (see sigmaSpace ) will be mixed together, resulting in
  785. * larger areas of semi-equal color.
  786. *
  787. * @param sigmaSpace Filter sigma in the coordinate space. A larger value of the parameter means that
  788. * farther pixels will influence each other as long as their colors are close enough (see sigmaColor ).
  789. * When d\>0 , it specifies the neighborhood size regardless of sigmaSpace . Otherwise, d is
  790. * proportional to sigmaSpace .
  791. *
  792. *
  793. * NOTE: bilateralFilter and jointBilateralFilter use L1 norm to compute difference between colors.
  794. *
  795. * @see `bilateralFilter`, `+amFilter:src:dst:sigma_s:sigma_r:adjust_outliers:`
  796. */
  797. + (void)jointBilateralFilter:(Mat*)joint src:(Mat*)src dst:(Mat*)dst d:(int)d sigmaColor:(double)sigmaColor sigmaSpace:(double)sigmaSpace NS_SWIFT_NAME(jointBilateralFilter(joint:src:dst:d:sigmaColor:sigmaSpace:));
  798. //
  799. // void cv::ximgproc::bilateralTextureFilter(Mat src, Mat& dst, int fr = 3, int numIter = 1, double sigmaAlpha = -1., double sigmaAvg = -1.)
  800. //
  801. /**
  802. * Applies the bilateral texture filter to an image. It performs structure-preserving texture filter.
  803. * For more details about this filter see CITE: Cho2014.
  804. *
  805. * @param src Source image whose depth is 8-bit UINT or 32-bit FLOAT
  806. *
  807. * @param dst Destination image of the same size and type as src.
  808. *
  809. * @param fr Radius of kernel to be used for filtering. It should be positive integer
  810. *
  811. * @param numIter Number of iterations of algorithm, It should be positive integer
  812. *
  813. * @param sigmaAlpha Controls the sharpness of the weight transition from edges to smooth/texture regions, where
  814. * a bigger value means sharper transition. When the value is negative, it is automatically calculated.
  815. *
  816. * @param sigmaAvg Range blur parameter for texture blurring. Larger value makes result to be more blurred. When the
  817. * value is negative, it is automatically calculated as described in the paper.
  818. *
  819. * @see `+rollingGuidanceFilter:dst:d:sigmaColor:sigmaSpace:numOfIter:borderType:`, `bilateralFilter`
  820. */
  821. + (void)bilateralTextureFilter:(Mat*)src dst:(Mat*)dst fr:(int)fr numIter:(int)numIter sigmaAlpha:(double)sigmaAlpha sigmaAvg:(double)sigmaAvg NS_SWIFT_NAME(bilateralTextureFilter(src:dst:fr:numIter:sigmaAlpha:sigmaAvg:));
  822. /**
  823. * Applies the bilateral texture filter to an image. It performs structure-preserving texture filter.
  824. * For more details about this filter see CITE: Cho2014.
  825. *
  826. * @param src Source image whose depth is 8-bit UINT or 32-bit FLOAT
  827. *
  828. * @param dst Destination image of the same size and type as src.
  829. *
  830. * @param fr Radius of kernel to be used for filtering. It should be positive integer
  831. *
  832. * @param numIter Number of iterations of algorithm, It should be positive integer
  833. *
  834. * @param sigmaAlpha Controls the sharpness of the weight transition from edges to smooth/texture regions, where
  835. * a bigger value means sharper transition. When the value is negative, it is automatically calculated.
  836. *
  837. * value is negative, it is automatically calculated as described in the paper.
  838. *
  839. * @see `+rollingGuidanceFilter:dst:d:sigmaColor:sigmaSpace:numOfIter:borderType:`, `bilateralFilter`
  840. */
  841. + (void)bilateralTextureFilter:(Mat*)src dst:(Mat*)dst fr:(int)fr numIter:(int)numIter sigmaAlpha:(double)sigmaAlpha NS_SWIFT_NAME(bilateralTextureFilter(src:dst:fr:numIter:sigmaAlpha:));
  842. /**
  843. * Applies the bilateral texture filter to an image. It performs structure-preserving texture filter.
  844. * For more details about this filter see CITE: Cho2014.
  845. *
  846. * @param src Source image whose depth is 8-bit UINT or 32-bit FLOAT
  847. *
  848. * @param dst Destination image of the same size and type as src.
  849. *
  850. * @param fr Radius of kernel to be used for filtering. It should be positive integer
  851. *
  852. * @param numIter Number of iterations of algorithm, It should be positive integer
  853. *
  854. * a bigger value means sharper transition. When the value is negative, it is automatically calculated.
  855. *
  856. * value is negative, it is automatically calculated as described in the paper.
  857. *
  858. * @see `+rollingGuidanceFilter:dst:d:sigmaColor:sigmaSpace:numOfIter:borderType:`, `bilateralFilter`
  859. */
  860. + (void)bilateralTextureFilter:(Mat*)src dst:(Mat*)dst fr:(int)fr numIter:(int)numIter NS_SWIFT_NAME(bilateralTextureFilter(src:dst:fr:numIter:));
  861. /**
  862. * Applies the bilateral texture filter to an image. It performs structure-preserving texture filter.
  863. * For more details about this filter see CITE: Cho2014.
  864. *
  865. * @param src Source image whose depth is 8-bit UINT or 32-bit FLOAT
  866. *
  867. * @param dst Destination image of the same size and type as src.
  868. *
  869. * @param fr Radius of kernel to be used for filtering. It should be positive integer
  870. *
  871. *
  872. * a bigger value means sharper transition. When the value is negative, it is automatically calculated.
  873. *
  874. * value is negative, it is automatically calculated as described in the paper.
  875. *
  876. * @see `+rollingGuidanceFilter:dst:d:sigmaColor:sigmaSpace:numOfIter:borderType:`, `bilateralFilter`
  877. */
  878. + (void)bilateralTextureFilter:(Mat*)src dst:(Mat*)dst fr:(int)fr NS_SWIFT_NAME(bilateralTextureFilter(src:dst:fr:));
  879. /**
  880. * Applies the bilateral texture filter to an image. It performs structure-preserving texture filter.
  881. * For more details about this filter see CITE: Cho2014.
  882. *
  883. * @param src Source image whose depth is 8-bit UINT or 32-bit FLOAT
  884. *
  885. * @param dst Destination image of the same size and type as src.
  886. *
  887. *
  888. *
  889. * a bigger value means sharper transition. When the value is negative, it is automatically calculated.
  890. *
  891. * value is negative, it is automatically calculated as described in the paper.
  892. *
  893. * @see `+rollingGuidanceFilter:dst:d:sigmaColor:sigmaSpace:numOfIter:borderType:`, `bilateralFilter`
  894. */
  895. + (void)bilateralTextureFilter:(Mat*)src dst:(Mat*)dst NS_SWIFT_NAME(bilateralTextureFilter(src:dst:));
  896. //
  897. // void cv::ximgproc::rollingGuidanceFilter(Mat src, Mat& dst, int d = -1, double sigmaColor = 25, double sigmaSpace = 3, int numOfIter = 4, int borderType = BORDER_DEFAULT)
  898. //
  899. /**
  900. * Applies the rolling guidance filter to an image.
  901. *
  902. * For more details, please see CITE: zhang2014rolling
  903. *
  904. * @param src Source 8-bit or floating-point, 1-channel or 3-channel image.
  905. *
  906. * @param dst Destination image of the same size and type as src.
  907. *
  908. * @param d Diameter of each pixel neighborhood that is used during filtering. If it is non-positive,
  909. * it is computed from sigmaSpace .
  910. *
  911. * @param sigmaColor Filter sigma in the color space. A larger value of the parameter means that
  912. * farther colors within the pixel neighborhood (see sigmaSpace ) will be mixed together, resulting in
  913. * larger areas of semi-equal color.
  914. *
  915. * @param sigmaSpace Filter sigma in the coordinate space. A larger value of the parameter means that
  916. * farther pixels will influence each other as long as their colors are close enough (see sigmaColor ).
  917. * When d\>0 , it specifies the neighborhood size regardless of sigmaSpace . Otherwise, d is
  918. * proportional to sigmaSpace .
  919. *
  920. * @param numOfIter Number of iterations of joint edge-preserving filtering applied on the source image.
  921. *
  922. * @param borderType
  923. *
  924. * NOTE: rollingGuidanceFilter uses jointBilateralFilter as the edge-preserving filter.
  925. *
  926. * @see `+jointBilateralFilter:src:dst:d:sigmaColor:sigmaSpace:borderType:`, `bilateralFilter`, `+amFilter:src:dst:sigma_s:sigma_r:adjust_outliers:`
  927. */
  928. + (void)rollingGuidanceFilter:(Mat*)src dst:(Mat*)dst d:(int)d sigmaColor:(double)sigmaColor sigmaSpace:(double)sigmaSpace numOfIter:(int)numOfIter borderType:(int)borderType NS_SWIFT_NAME(rollingGuidanceFilter(src:dst:d:sigmaColor:sigmaSpace:numOfIter:borderType:));
  929. /**
  930. * Applies the rolling guidance filter to an image.
  931. *
  932. * For more details, please see CITE: zhang2014rolling
  933. *
  934. * @param src Source 8-bit or floating-point, 1-channel or 3-channel image.
  935. *
  936. * @param dst Destination image of the same size and type as src.
  937. *
  938. * @param d Diameter of each pixel neighborhood that is used during filtering. If it is non-positive,
  939. * it is computed from sigmaSpace .
  940. *
  941. * @param sigmaColor Filter sigma in the color space. A larger value of the parameter means that
  942. * farther colors within the pixel neighborhood (see sigmaSpace ) will be mixed together, resulting in
  943. * larger areas of semi-equal color.
  944. *
  945. * @param sigmaSpace Filter sigma in the coordinate space. A larger value of the parameter means that
  946. * farther pixels will influence each other as long as their colors are close enough (see sigmaColor ).
  947. * When d\>0 , it specifies the neighborhood size regardless of sigmaSpace . Otherwise, d is
  948. * proportional to sigmaSpace .
  949. *
  950. * @param numOfIter Number of iterations of joint edge-preserving filtering applied on the source image.
  951. *
  952. *
  953. * NOTE: rollingGuidanceFilter uses jointBilateralFilter as the edge-preserving filter.
  954. *
  955. * @see `+jointBilateralFilter:src:dst:d:sigmaColor:sigmaSpace:borderType:`, `bilateralFilter`, `+amFilter:src:dst:sigma_s:sigma_r:adjust_outliers:`
  956. */
  957. + (void)rollingGuidanceFilter:(Mat*)src dst:(Mat*)dst d:(int)d sigmaColor:(double)sigmaColor sigmaSpace:(double)sigmaSpace numOfIter:(int)numOfIter NS_SWIFT_NAME(rollingGuidanceFilter(src:dst:d:sigmaColor:sigmaSpace:numOfIter:));
  958. /**
  959. * Applies the rolling guidance filter to an image.
  960. *
  961. * For more details, please see CITE: zhang2014rolling
  962. *
  963. * @param src Source 8-bit or floating-point, 1-channel or 3-channel image.
  964. *
  965. * @param dst Destination image of the same size and type as src.
  966. *
  967. * @param d Diameter of each pixel neighborhood that is used during filtering. If it is non-positive,
  968. * it is computed from sigmaSpace .
  969. *
  970. * @param sigmaColor Filter sigma in the color space. A larger value of the parameter means that
  971. * farther colors within the pixel neighborhood (see sigmaSpace ) will be mixed together, resulting in
  972. * larger areas of semi-equal color.
  973. *
  974. * @param sigmaSpace Filter sigma in the coordinate space. A larger value of the parameter means that
  975. * farther pixels will influence each other as long as their colors are close enough (see sigmaColor ).
  976. * When d\>0 , it specifies the neighborhood size regardless of sigmaSpace . Otherwise, d is
  977. * proportional to sigmaSpace .
  978. *
  979. *
  980. *
  981. * NOTE: rollingGuidanceFilter uses jointBilateralFilter as the edge-preserving filter.
  982. *
  983. * @see `+jointBilateralFilter:src:dst:d:sigmaColor:sigmaSpace:borderType:`, `bilateralFilter`, `+amFilter:src:dst:sigma_s:sigma_r:adjust_outliers:`
  984. */
  985. + (void)rollingGuidanceFilter:(Mat*)src dst:(Mat*)dst d:(int)d sigmaColor:(double)sigmaColor sigmaSpace:(double)sigmaSpace NS_SWIFT_NAME(rollingGuidanceFilter(src:dst:d:sigmaColor:sigmaSpace:));
  986. /**
  987. * Applies the rolling guidance filter to an image.
  988. *
  989. * For more details, please see CITE: zhang2014rolling
  990. *
  991. * @param src Source 8-bit or floating-point, 1-channel or 3-channel image.
  992. *
  993. * @param dst Destination image of the same size and type as src.
  994. *
  995. * @param d Diameter of each pixel neighborhood that is used during filtering. If it is non-positive,
  996. * it is computed from sigmaSpace .
  997. *
  998. * @param sigmaColor Filter sigma in the color space. A larger value of the parameter means that
  999. * farther colors within the pixel neighborhood (see sigmaSpace ) will be mixed together, resulting in
  1000. * larger areas of semi-equal color.
  1001. *
  1002. * farther pixels will influence each other as long as their colors are close enough (see sigmaColor ).
  1003. * When d\>0 , it specifies the neighborhood size regardless of sigmaSpace . Otherwise, d is
  1004. * proportional to sigmaSpace .
  1005. *
  1006. *
  1007. *
  1008. * NOTE: rollingGuidanceFilter uses jointBilateralFilter as the edge-preserving filter.
  1009. *
  1010. * @see `+jointBilateralFilter:src:dst:d:sigmaColor:sigmaSpace:borderType:`, `bilateralFilter`, `+amFilter:src:dst:sigma_s:sigma_r:adjust_outliers:`
  1011. */
  1012. + (void)rollingGuidanceFilter:(Mat*)src dst:(Mat*)dst d:(int)d sigmaColor:(double)sigmaColor NS_SWIFT_NAME(rollingGuidanceFilter(src:dst:d:sigmaColor:));
  1013. /**
  1014. * Applies the rolling guidance filter to an image.
  1015. *
  1016. * For more details, please see CITE: zhang2014rolling
  1017. *
  1018. * @param src Source 8-bit or floating-point, 1-channel or 3-channel image.
  1019. *
  1020. * @param dst Destination image of the same size and type as src.
  1021. *
  1022. * @param d Diameter of each pixel neighborhood that is used during filtering. If it is non-positive,
  1023. * it is computed from sigmaSpace .
  1024. *
  1025. * farther colors within the pixel neighborhood (see sigmaSpace ) will be mixed together, resulting in
  1026. * larger areas of semi-equal color.
  1027. *
  1028. * farther pixels will influence each other as long as their colors are close enough (see sigmaColor ).
  1029. * When d\>0 , it specifies the neighborhood size regardless of sigmaSpace . Otherwise, d is
  1030. * proportional to sigmaSpace .
  1031. *
  1032. *
  1033. *
  1034. * NOTE: rollingGuidanceFilter uses jointBilateralFilter as the edge-preserving filter.
  1035. *
  1036. * @see `+jointBilateralFilter:src:dst:d:sigmaColor:sigmaSpace:borderType:`, `bilateralFilter`, `+amFilter:src:dst:sigma_s:sigma_r:adjust_outliers:`
  1037. */
  1038. + (void)rollingGuidanceFilter:(Mat*)src dst:(Mat*)dst d:(int)d NS_SWIFT_NAME(rollingGuidanceFilter(src:dst:d:));
  1039. /**
  1040. * Applies the rolling guidance filter to an image.
  1041. *
  1042. * For more details, please see CITE: zhang2014rolling
  1043. *
  1044. * @param src Source 8-bit or floating-point, 1-channel or 3-channel image.
  1045. *
  1046. * @param dst Destination image of the same size and type as src.
  1047. *
  1048. * it is computed from sigmaSpace .
  1049. *
  1050. * farther colors within the pixel neighborhood (see sigmaSpace ) will be mixed together, resulting in
  1051. * larger areas of semi-equal color.
  1052. *
  1053. * farther pixels will influence each other as long as their colors are close enough (see sigmaColor ).
  1054. * When d\>0 , it specifies the neighborhood size regardless of sigmaSpace . Otherwise, d is
  1055. * proportional to sigmaSpace .
  1056. *
  1057. *
  1058. *
  1059. * NOTE: rollingGuidanceFilter uses jointBilateralFilter as the edge-preserving filter.
  1060. *
  1061. * @see `+jointBilateralFilter:src:dst:d:sigmaColor:sigmaSpace:borderType:`, `bilateralFilter`, `+amFilter:src:dst:sigma_s:sigma_r:adjust_outliers:`
  1062. */
  1063. + (void)rollingGuidanceFilter:(Mat*)src dst:(Mat*)dst NS_SWIFT_NAME(rollingGuidanceFilter(src:dst:));
  1064. //
  1065. // Ptr_FastBilateralSolverFilter cv::ximgproc::createFastBilateralSolverFilter(Mat guide, double sigma_spatial, double sigma_luma, double sigma_chroma, double lambda = 128.0, int num_iter = 25, double max_tol = 1e-5)
  1066. //
  1067. /**
  1068. * Factory method, create instance of FastBilateralSolverFilter and execute the initialization routines.
  1069. *
  1070. * @param guide image serving as guide for filtering. It should have 8-bit depth and either 1 or 3 channels.
  1071. *
  1072. * @param sigma_spatial parameter, that is similar to spatial space sigma (bandwidth) in bilateralFilter.
  1073. *
  1074. * @param sigma_luma parameter, that is similar to luma space sigma (bandwidth) in bilateralFilter.
  1075. *
  1076. * @param sigma_chroma parameter, that is similar to chroma space sigma (bandwidth) in bilateralFilter.
  1077. *
  1078. * @param lambda smoothness strength parameter for solver.
  1079. *
  1080. * @param num_iter number of iterations used for solver, 25 is usually enough.
  1081. *
  1082. * @param max_tol convergence tolerance used for solver.
  1083. *
  1084. * For more details about the Fast Bilateral Solver parameters, see the original paper CITE: BarronPoole2016.
  1085. */
  1086. + (FastBilateralSolverFilter*)createFastBilateralSolverFilter:(Mat*)guide sigma_spatial:(double)sigma_spatial sigma_luma:(double)sigma_luma sigma_chroma:(double)sigma_chroma lambda:(double)lambda num_iter:(int)num_iter max_tol:(double)max_tol NS_SWIFT_NAME(createFastBilateralSolverFilter(guide:sigma_spatial:sigma_luma:sigma_chroma:lambda:num_iter:max_tol:));
  1087. /**
  1088. * Factory method, create instance of FastBilateralSolverFilter and execute the initialization routines.
  1089. *
  1090. * @param guide image serving as guide for filtering. It should have 8-bit depth and either 1 or 3 channels.
  1091. *
  1092. * @param sigma_spatial parameter, that is similar to spatial space sigma (bandwidth) in bilateralFilter.
  1093. *
  1094. * @param sigma_luma parameter, that is similar to luma space sigma (bandwidth) in bilateralFilter.
  1095. *
  1096. * @param sigma_chroma parameter, that is similar to chroma space sigma (bandwidth) in bilateralFilter.
  1097. *
  1098. * @param lambda smoothness strength parameter for solver.
  1099. *
  1100. * @param num_iter number of iterations used for solver, 25 is usually enough.
  1101. *
  1102. *
  1103. * For more details about the Fast Bilateral Solver parameters, see the original paper CITE: BarronPoole2016.
  1104. */
  1105. + (FastBilateralSolverFilter*)createFastBilateralSolverFilter:(Mat*)guide sigma_spatial:(double)sigma_spatial sigma_luma:(double)sigma_luma sigma_chroma:(double)sigma_chroma lambda:(double)lambda num_iter:(int)num_iter NS_SWIFT_NAME(createFastBilateralSolverFilter(guide:sigma_spatial:sigma_luma:sigma_chroma:lambda:num_iter:));
  1106. /**
  1107. * Factory method, create instance of FastBilateralSolverFilter and execute the initialization routines.
  1108. *
  1109. * @param guide image serving as guide for filtering. It should have 8-bit depth and either 1 or 3 channels.
  1110. *
  1111. * @param sigma_spatial parameter, that is similar to spatial space sigma (bandwidth) in bilateralFilter.
  1112. *
  1113. * @param sigma_luma parameter, that is similar to luma space sigma (bandwidth) in bilateralFilter.
  1114. *
  1115. * @param sigma_chroma parameter, that is similar to chroma space sigma (bandwidth) in bilateralFilter.
  1116. *
  1117. * @param lambda smoothness strength parameter for solver.
  1118. *
  1119. *
  1120. *
  1121. * For more details about the Fast Bilateral Solver parameters, see the original paper CITE: BarronPoole2016.
  1122. */
  1123. + (FastBilateralSolverFilter*)createFastBilateralSolverFilter:(Mat*)guide sigma_spatial:(double)sigma_spatial sigma_luma:(double)sigma_luma sigma_chroma:(double)sigma_chroma lambda:(double)lambda NS_SWIFT_NAME(createFastBilateralSolverFilter(guide:sigma_spatial:sigma_luma:sigma_chroma:lambda:));
  1124. /**
  1125. * Factory method, create instance of FastBilateralSolverFilter and execute the initialization routines.
  1126. *
  1127. * @param guide image serving as guide for filtering. It should have 8-bit depth and either 1 or 3 channels.
  1128. *
  1129. * @param sigma_spatial parameter, that is similar to spatial space sigma (bandwidth) in bilateralFilter.
  1130. *
  1131. * @param sigma_luma parameter, that is similar to luma space sigma (bandwidth) in bilateralFilter.
  1132. *
  1133. * @param sigma_chroma parameter, that is similar to chroma space sigma (bandwidth) in bilateralFilter.
  1134. *
  1135. *
  1136. *
  1137. *
  1138. * For more details about the Fast Bilateral Solver parameters, see the original paper CITE: BarronPoole2016.
  1139. */
  1140. + (FastBilateralSolverFilter*)createFastBilateralSolverFilter:(Mat*)guide sigma_spatial:(double)sigma_spatial sigma_luma:(double)sigma_luma sigma_chroma:(double)sigma_chroma NS_SWIFT_NAME(createFastBilateralSolverFilter(guide:sigma_spatial:sigma_luma:sigma_chroma:));
  1141. //
  1142. // void cv::ximgproc::fastBilateralSolverFilter(Mat guide, Mat src, Mat confidence, Mat& dst, double sigma_spatial = 8, double sigma_luma = 8, double sigma_chroma = 8, double lambda = 128.0, int num_iter = 25, double max_tol = 1e-5)
  1143. //
  1144. /**
  1145. * Simple one-line Fast Bilateral Solver filter call. If you have multiple images to filter with the same
  1146. * guide then use FastBilateralSolverFilter interface to avoid extra computations.
  1147. *
  1148. * @param guide image serving as guide for filtering. It should have 8-bit depth and either 1 or 3 channels.
  1149. *
  1150. * @param src source image for filtering with unsigned 8-bit or signed 16-bit or floating-point 32-bit depth and up to 4 channels.
  1151. *
  1152. * @param confidence confidence image with unsigned 8-bit or floating-point 32-bit confidence and 1 channel.
  1153. *
  1154. * @param dst destination image.
  1155. *
  1156. * @param sigma_spatial parameter, that is similar to spatial space sigma (bandwidth) in bilateralFilter.
  1157. *
  1158. * @param sigma_luma parameter, that is similar to luma space sigma (bandwidth) in bilateralFilter.
  1159. *
  1160. * @param sigma_chroma parameter, that is similar to chroma space sigma (bandwidth) in bilateralFilter.
  1161. *
  1162. * @param lambda smoothness strength parameter for solver.
  1163. *
  1164. * @param num_iter number of iterations used for solver, 25 is usually enough.
  1165. *
  1166. * @param max_tol convergence tolerance used for solver.
  1167. *
  1168. * For more details about the Fast Bilateral Solver parameters, see the original paper CITE: BarronPoole2016.
  1169. *
  1170. * NOTE: Confidence images with CV_8U depth are expected to in [0, 255] and CV_32F in [0, 1] range.
  1171. */
  1172. + (void)fastBilateralSolverFilter:(Mat*)guide src:(Mat*)src confidence:(Mat*)confidence dst:(Mat*)dst sigma_spatial:(double)sigma_spatial sigma_luma:(double)sigma_luma sigma_chroma:(double)sigma_chroma lambda:(double)lambda num_iter:(int)num_iter max_tol:(double)max_tol NS_SWIFT_NAME(fastBilateralSolverFilter(guide:src:confidence:dst:sigma_spatial:sigma_luma:sigma_chroma:lambda:num_iter:max_tol:));
  1173. /**
  1174. * Simple one-line Fast Bilateral Solver filter call. If you have multiple images to filter with the same
  1175. * guide then use FastBilateralSolverFilter interface to avoid extra computations.
  1176. *
  1177. * @param guide image serving as guide for filtering. It should have 8-bit depth and either 1 or 3 channels.
  1178. *
  1179. * @param src source image for filtering with unsigned 8-bit or signed 16-bit or floating-point 32-bit depth and up to 4 channels.
  1180. *
  1181. * @param confidence confidence image with unsigned 8-bit or floating-point 32-bit confidence and 1 channel.
  1182. *
  1183. * @param dst destination image.
  1184. *
  1185. * @param sigma_spatial parameter, that is similar to spatial space sigma (bandwidth) in bilateralFilter.
  1186. *
  1187. * @param sigma_luma parameter, that is similar to luma space sigma (bandwidth) in bilateralFilter.
  1188. *
  1189. * @param sigma_chroma parameter, that is similar to chroma space sigma (bandwidth) in bilateralFilter.
  1190. *
  1191. * @param lambda smoothness strength parameter for solver.
  1192. *
  1193. * @param num_iter number of iterations used for solver, 25 is usually enough.
  1194. *
  1195. *
  1196. * For more details about the Fast Bilateral Solver parameters, see the original paper CITE: BarronPoole2016.
  1197. *
  1198. * NOTE: Confidence images with CV_8U depth are expected to in [0, 255] and CV_32F in [0, 1] range.
  1199. */
  1200. + (void)fastBilateralSolverFilter:(Mat*)guide src:(Mat*)src confidence:(Mat*)confidence dst:(Mat*)dst sigma_spatial:(double)sigma_spatial sigma_luma:(double)sigma_luma sigma_chroma:(double)sigma_chroma lambda:(double)lambda num_iter:(int)num_iter NS_SWIFT_NAME(fastBilateralSolverFilter(guide:src:confidence:dst:sigma_spatial:sigma_luma:sigma_chroma:lambda:num_iter:));
  1201. /**
  1202. * Simple one-line Fast Bilateral Solver filter call. If you have multiple images to filter with the same
  1203. * guide then use FastBilateralSolverFilter interface to avoid extra computations.
  1204. *
  1205. * @param guide image serving as guide for filtering. It should have 8-bit depth and either 1 or 3 channels.
  1206. *
  1207. * @param src source image for filtering with unsigned 8-bit or signed 16-bit or floating-point 32-bit depth and up to 4 channels.
  1208. *
  1209. * @param confidence confidence image with unsigned 8-bit or floating-point 32-bit confidence and 1 channel.
  1210. *
  1211. * @param dst destination image.
  1212. *
  1213. * @param sigma_spatial parameter, that is similar to spatial space sigma (bandwidth) in bilateralFilter.
  1214. *
  1215. * @param sigma_luma parameter, that is similar to luma space sigma (bandwidth) in bilateralFilter.
  1216. *
  1217. * @param sigma_chroma parameter, that is similar to chroma space sigma (bandwidth) in bilateralFilter.
  1218. *
  1219. * @param lambda smoothness strength parameter for solver.
  1220. *
  1221. *
  1222. *
  1223. * For more details about the Fast Bilateral Solver parameters, see the original paper CITE: BarronPoole2016.
  1224. *
  1225. * NOTE: Confidence images with CV_8U depth are expected to in [0, 255] and CV_32F in [0, 1] range.
  1226. */
  1227. + (void)fastBilateralSolverFilter:(Mat*)guide src:(Mat*)src confidence:(Mat*)confidence dst:(Mat*)dst sigma_spatial:(double)sigma_spatial sigma_luma:(double)sigma_luma sigma_chroma:(double)sigma_chroma lambda:(double)lambda NS_SWIFT_NAME(fastBilateralSolverFilter(guide:src:confidence:dst:sigma_spatial:sigma_luma:sigma_chroma:lambda:));
  1228. /**
  1229. * Simple one-line Fast Bilateral Solver filter call. If you have multiple images to filter with the same
  1230. * guide then use FastBilateralSolverFilter interface to avoid extra computations.
  1231. *
  1232. * @param guide image serving as guide for filtering. It should have 8-bit depth and either 1 or 3 channels.
  1233. *
  1234. * @param src source image for filtering with unsigned 8-bit or signed 16-bit or floating-point 32-bit depth and up to 4 channels.
  1235. *
  1236. * @param confidence confidence image with unsigned 8-bit or floating-point 32-bit confidence and 1 channel.
  1237. *
  1238. * @param dst destination image.
  1239. *
  1240. * @param sigma_spatial parameter, that is similar to spatial space sigma (bandwidth) in bilateralFilter.
  1241. *
  1242. * @param sigma_luma parameter, that is similar to luma space sigma (bandwidth) in bilateralFilter.
  1243. *
  1244. * @param sigma_chroma parameter, that is similar to chroma space sigma (bandwidth) in bilateralFilter.
  1245. *
  1246. *
  1247. *
  1248. *
  1249. * For more details about the Fast Bilateral Solver parameters, see the original paper CITE: BarronPoole2016.
  1250. *
  1251. * NOTE: Confidence images with CV_8U depth are expected to in [0, 255] and CV_32F in [0, 1] range.
  1252. */
  1253. + (void)fastBilateralSolverFilter:(Mat*)guide src:(Mat*)src confidence:(Mat*)confidence dst:(Mat*)dst sigma_spatial:(double)sigma_spatial sigma_luma:(double)sigma_luma sigma_chroma:(double)sigma_chroma NS_SWIFT_NAME(fastBilateralSolverFilter(guide:src:confidence:dst:sigma_spatial:sigma_luma:sigma_chroma:));
  1254. /**
  1255. * Simple one-line Fast Bilateral Solver filter call. If you have multiple images to filter with the same
  1256. * guide then use FastBilateralSolverFilter interface to avoid extra computations.
  1257. *
  1258. * @param guide image serving as guide for filtering. It should have 8-bit depth and either 1 or 3 channels.
  1259. *
  1260. * @param src source image for filtering with unsigned 8-bit or signed 16-bit or floating-point 32-bit depth and up to 4 channels.
  1261. *
  1262. * @param confidence confidence image with unsigned 8-bit or floating-point 32-bit confidence and 1 channel.
  1263. *
  1264. * @param dst destination image.
  1265. *
  1266. * @param sigma_spatial parameter, that is similar to spatial space sigma (bandwidth) in bilateralFilter.
  1267. *
  1268. * @param sigma_luma parameter, that is similar to luma space sigma (bandwidth) in bilateralFilter.
  1269. *
  1270. *
  1271. *
  1272. *
  1273. *
  1274. * For more details about the Fast Bilateral Solver parameters, see the original paper CITE: BarronPoole2016.
  1275. *
  1276. * NOTE: Confidence images with CV_8U depth are expected to in [0, 255] and CV_32F in [0, 1] range.
  1277. */
  1278. + (void)fastBilateralSolverFilter:(Mat*)guide src:(Mat*)src confidence:(Mat*)confidence dst:(Mat*)dst sigma_spatial:(double)sigma_spatial sigma_luma:(double)sigma_luma NS_SWIFT_NAME(fastBilateralSolverFilter(guide:src:confidence:dst:sigma_spatial:sigma_luma:));
  1279. /**
  1280. * Simple one-line Fast Bilateral Solver filter call. If you have multiple images to filter with the same
  1281. * guide then use FastBilateralSolverFilter interface to avoid extra computations.
  1282. *
  1283. * @param guide image serving as guide for filtering. It should have 8-bit depth and either 1 or 3 channels.
  1284. *
  1285. * @param src source image for filtering with unsigned 8-bit or signed 16-bit or floating-point 32-bit depth and up to 4 channels.
  1286. *
  1287. * @param confidence confidence image with unsigned 8-bit or floating-point 32-bit confidence and 1 channel.
  1288. *
  1289. * @param dst destination image.
  1290. *
  1291. * @param sigma_spatial parameter, that is similar to spatial space sigma (bandwidth) in bilateralFilter.
  1292. *
  1293. *
  1294. *
  1295. *
  1296. *
  1297. *
  1298. * For more details about the Fast Bilateral Solver parameters, see the original paper CITE: BarronPoole2016.
  1299. *
  1300. * NOTE: Confidence images with CV_8U depth are expected to in [0, 255] and CV_32F in [0, 1] range.
  1301. */
  1302. + (void)fastBilateralSolverFilter:(Mat*)guide src:(Mat*)src confidence:(Mat*)confidence dst:(Mat*)dst sigma_spatial:(double)sigma_spatial NS_SWIFT_NAME(fastBilateralSolverFilter(guide:src:confidence:dst:sigma_spatial:));
  1303. /**
  1304. * Simple one-line Fast Bilateral Solver filter call. If you have multiple images to filter with the same
  1305. * guide then use FastBilateralSolverFilter interface to avoid extra computations.
  1306. *
  1307. * @param guide image serving as guide for filtering. It should have 8-bit depth and either 1 or 3 channels.
  1308. *
  1309. * @param src source image for filtering with unsigned 8-bit or signed 16-bit or floating-point 32-bit depth and up to 4 channels.
  1310. *
  1311. * @param confidence confidence image with unsigned 8-bit or floating-point 32-bit confidence and 1 channel.
  1312. *
  1313. * @param dst destination image.
  1314. *
  1315. *
  1316. *
  1317. *
  1318. *
  1319. *
  1320. *
  1321. * For more details about the Fast Bilateral Solver parameters, see the original paper CITE: BarronPoole2016.
  1322. *
  1323. * NOTE: Confidence images with CV_8U depth are expected to in [0, 255] and CV_32F in [0, 1] range.
  1324. */
  1325. + (void)fastBilateralSolverFilter:(Mat*)guide src:(Mat*)src confidence:(Mat*)confidence dst:(Mat*)dst NS_SWIFT_NAME(fastBilateralSolverFilter(guide:src:confidence:dst:));
  1326. //
  1327. // Ptr_FastGlobalSmootherFilter cv::ximgproc::createFastGlobalSmootherFilter(Mat guide, double lambda, double sigma_color, double lambda_attenuation = 0.25, int num_iter = 3)
  1328. //
  1329. /**
  1330. * Factory method, create instance of FastGlobalSmootherFilter and execute the initialization routines.
  1331. *
  1332. * @param guide image serving as guide for filtering. It should have 8-bit depth and either 1 or 3 channels.
  1333. *
  1334. * @param lambda parameter defining the amount of regularization
  1335. *
  1336. * @param sigma_color parameter, that is similar to color space sigma in bilateralFilter.
  1337. *
  1338. * @param lambda_attenuation internal parameter, defining how much lambda decreases after each iteration. Normally,
  1339. * it should be 0.25. Setting it to 1.0 may lead to streaking artifacts.
  1340. *
  1341. * @param num_iter number of iterations used for filtering, 3 is usually enough.
  1342. *
  1343. * For more details about Fast Global Smoother parameters, see the original paper CITE: Min2014. However, please note that
  1344. * there are several differences. Lambda attenuation described in the paper is implemented a bit differently so do not
  1345. * expect the results to be identical to those from the paper; sigma_color values from the paper should be multiplied by 255.0 to
  1346. * achieve the same effect. Also, in case of image filtering where source and guide image are the same, authors
  1347. * propose to dynamically update the guide image after each iteration. To maximize the performance this feature
  1348. * was not implemented here.
  1349. */
  1350. + (FastGlobalSmootherFilter*)createFastGlobalSmootherFilter:(Mat*)guide lambda:(double)lambda sigma_color:(double)sigma_color lambda_attenuation:(double)lambda_attenuation num_iter:(int)num_iter NS_SWIFT_NAME(createFastGlobalSmootherFilter(guide:lambda:sigma_color:lambda_attenuation:num_iter:));
  1351. /**
  1352. * Factory method, create instance of FastGlobalSmootherFilter and execute the initialization routines.
  1353. *
  1354. * @param guide image serving as guide for filtering. It should have 8-bit depth and either 1 or 3 channels.
  1355. *
  1356. * @param lambda parameter defining the amount of regularization
  1357. *
  1358. * @param sigma_color parameter, that is similar to color space sigma in bilateralFilter.
  1359. *
  1360. * @param lambda_attenuation internal parameter, defining how much lambda decreases after each iteration. Normally,
  1361. * it should be 0.25. Setting it to 1.0 may lead to streaking artifacts.
  1362. *
  1363. *
  1364. * For more details about Fast Global Smoother parameters, see the original paper CITE: Min2014. However, please note that
  1365. * there are several differences. Lambda attenuation described in the paper is implemented a bit differently so do not
  1366. * expect the results to be identical to those from the paper; sigma_color values from the paper should be multiplied by 255.0 to
  1367. * achieve the same effect. Also, in case of image filtering where source and guide image are the same, authors
  1368. * propose to dynamically update the guide image after each iteration. To maximize the performance this feature
  1369. * was not implemented here.
  1370. */
  1371. + (FastGlobalSmootherFilter*)createFastGlobalSmootherFilter:(Mat*)guide lambda:(double)lambda sigma_color:(double)sigma_color lambda_attenuation:(double)lambda_attenuation NS_SWIFT_NAME(createFastGlobalSmootherFilter(guide:lambda:sigma_color:lambda_attenuation:));
  1372. /**
  1373. * Factory method, create instance of FastGlobalSmootherFilter and execute the initialization routines.
  1374. *
  1375. * @param guide image serving as guide for filtering. It should have 8-bit depth and either 1 or 3 channels.
  1376. *
  1377. * @param lambda parameter defining the amount of regularization
  1378. *
  1379. * @param sigma_color parameter, that is similar to color space sigma in bilateralFilter.
  1380. *
  1381. * it should be 0.25. Setting it to 1.0 may lead to streaking artifacts.
  1382. *
  1383. *
  1384. * For more details about Fast Global Smoother parameters, see the original paper CITE: Min2014. However, please note that
  1385. * there are several differences. Lambda attenuation described in the paper is implemented a bit differently so do not
  1386. * expect the results to be identical to those from the paper; sigma_color values from the paper should be multiplied by 255.0 to
  1387. * achieve the same effect. Also, in case of image filtering where source and guide image are the same, authors
  1388. * propose to dynamically update the guide image after each iteration. To maximize the performance this feature
  1389. * was not implemented here.
  1390. */
  1391. + (FastGlobalSmootherFilter*)createFastGlobalSmootherFilter:(Mat*)guide lambda:(double)lambda sigma_color:(double)sigma_color NS_SWIFT_NAME(createFastGlobalSmootherFilter(guide:lambda:sigma_color:));
  1392. //
  1393. // void cv::ximgproc::fastGlobalSmootherFilter(Mat guide, Mat src, Mat& dst, double lambda, double sigma_color, double lambda_attenuation = 0.25, int num_iter = 3)
  1394. //
  1395. /**
  1396. * Simple one-line Fast Global Smoother filter call. If you have multiple images to filter with the same
  1397. * guide then use FastGlobalSmootherFilter interface to avoid extra computations.
  1398. *
  1399. * @param guide image serving as guide for filtering. It should have 8-bit depth and either 1 or 3 channels.
  1400. *
  1401. * @param src source image for filtering with unsigned 8-bit or signed 16-bit or floating-point 32-bit depth and up to 4 channels.
  1402. *
  1403. * @param dst destination image.
  1404. *
  1405. * @param lambda parameter defining the amount of regularization
  1406. *
  1407. * @param sigma_color parameter, that is similar to color space sigma in bilateralFilter.
  1408. *
  1409. * @param lambda_attenuation internal parameter, defining how much lambda decreases after each iteration. Normally,
  1410. * it should be 0.25. Setting it to 1.0 may lead to streaking artifacts.
  1411. *
  1412. * @param num_iter number of iterations used for filtering, 3 is usually enough.
  1413. */
  1414. + (void)fastGlobalSmootherFilter:(Mat*)guide src:(Mat*)src dst:(Mat*)dst lambda:(double)lambda sigma_color:(double)sigma_color lambda_attenuation:(double)lambda_attenuation num_iter:(int)num_iter NS_SWIFT_NAME(fastGlobalSmootherFilter(guide:src:dst:lambda:sigma_color:lambda_attenuation:num_iter:));
  1415. /**
  1416. * Simple one-line Fast Global Smoother filter call. If you have multiple images to filter with the same
  1417. * guide then use FastGlobalSmootherFilter interface to avoid extra computations.
  1418. *
  1419. * @param guide image serving as guide for filtering. It should have 8-bit depth and either 1 or 3 channels.
  1420. *
  1421. * @param src source image for filtering with unsigned 8-bit or signed 16-bit or floating-point 32-bit depth and up to 4 channels.
  1422. *
  1423. * @param dst destination image.
  1424. *
  1425. * @param lambda parameter defining the amount of regularization
  1426. *
  1427. * @param sigma_color parameter, that is similar to color space sigma in bilateralFilter.
  1428. *
  1429. * @param lambda_attenuation internal parameter, defining how much lambda decreases after each iteration. Normally,
  1430. * it should be 0.25. Setting it to 1.0 may lead to streaking artifacts.
  1431. *
  1432. */
  1433. + (void)fastGlobalSmootherFilter:(Mat*)guide src:(Mat*)src dst:(Mat*)dst lambda:(double)lambda sigma_color:(double)sigma_color lambda_attenuation:(double)lambda_attenuation NS_SWIFT_NAME(fastGlobalSmootherFilter(guide:src:dst:lambda:sigma_color:lambda_attenuation:));
  1434. /**
  1435. * Simple one-line Fast Global Smoother filter call. If you have multiple images to filter with the same
  1436. * guide then use FastGlobalSmootherFilter interface to avoid extra computations.
  1437. *
  1438. * @param guide image serving as guide for filtering. It should have 8-bit depth and either 1 or 3 channels.
  1439. *
  1440. * @param src source image for filtering with unsigned 8-bit or signed 16-bit or floating-point 32-bit depth and up to 4 channels.
  1441. *
  1442. * @param dst destination image.
  1443. *
  1444. * @param lambda parameter defining the amount of regularization
  1445. *
  1446. * @param sigma_color parameter, that is similar to color space sigma in bilateralFilter.
  1447. *
  1448. * it should be 0.25. Setting it to 1.0 may lead to streaking artifacts.
  1449. *
  1450. */
  1451. + (void)fastGlobalSmootherFilter:(Mat*)guide src:(Mat*)src dst:(Mat*)dst lambda:(double)lambda sigma_color:(double)sigma_color NS_SWIFT_NAME(fastGlobalSmootherFilter(guide:src:dst:lambda:sigma_color:));
  1452. //
  1453. // void cv::ximgproc::l0Smooth(Mat src, Mat& dst, double lambda = 0.02, double kappa = 2.0)
  1454. //
  1455. /**
  1456. * Global image smoothing via L0 gradient minimization.
  1457. *
  1458. * @param src source image for filtering with unsigned 8-bit or signed 16-bit or floating-point depth.
  1459. *
  1460. * @param dst destination image.
  1461. *
  1462. * @param lambda parameter defining the smooth term weight.
  1463. *
  1464. * @param kappa parameter defining the increasing factor of the weight of the gradient data term.
  1465. *
  1466. * For more details about L0 Smoother, see the original paper CITE: xu2011image.
  1467. */
  1468. + (void)l0Smooth:(Mat*)src dst:(Mat*)dst lambda:(double)lambda kappa:(double)kappa NS_SWIFT_NAME(l0Smooth(src:dst:lambda:kappa:));
  1469. /**
  1470. * Global image smoothing via L0 gradient minimization.
  1471. *
  1472. * @param src source image for filtering with unsigned 8-bit or signed 16-bit or floating-point depth.
  1473. *
  1474. * @param dst destination image.
  1475. *
  1476. * @param lambda parameter defining the smooth term weight.
  1477. *
  1478. *
  1479. * For more details about L0 Smoother, see the original paper CITE: xu2011image.
  1480. */
  1481. + (void)l0Smooth:(Mat*)src dst:(Mat*)dst lambda:(double)lambda NS_SWIFT_NAME(l0Smooth(src:dst:lambda:));
  1482. /**
  1483. * Global image smoothing via L0 gradient minimization.
  1484. *
  1485. * @param src source image for filtering with unsigned 8-bit or signed 16-bit or floating-point depth.
  1486. *
  1487. * @param dst destination image.
  1488. *
  1489. *
  1490. *
  1491. * For more details about L0 Smoother, see the original paper CITE: xu2011image.
  1492. */
  1493. + (void)l0Smooth:(Mat*)src dst:(Mat*)dst NS_SWIFT_NAME(l0Smooth(src:dst:));
  1494. //
  1495. // Ptr_SuperpixelSLIC cv::ximgproc::createSuperpixelSLIC(Mat image, SLICType algorithm = SLICO, int region_size = 10, float ruler = 10.0f)
  1496. //
  1497. /**
  1498. * Initialize a SuperpixelSLIC object
  1499. *
  1500. * @param image Image to segment
  1501. * @param algorithm Chooses the algorithm variant to use:
  1502. * SLIC segments image using a desired region_size, and in addition SLICO will optimize using adaptive compactness factor,
  1503. * while MSLIC will optimize using manifold methods resulting in more content-sensitive superpixels.
  1504. * @param region_size Chooses an average superpixel size measured in pixels
  1505. * @param ruler Chooses the enforcement of superpixel smoothness factor of superpixel
  1506. *
  1507. * The function initializes a SuperpixelSLIC object for the input image. It sets the parameters of choosed
  1508. * superpixel algorithm, which are: region_size and ruler. It preallocate some buffers for future
  1509. * computing iterations over the given image. For enanched results it is recommended for color images to
  1510. * preprocess image with little gaussian blur using a small 3 x 3 kernel and additional conversion into
  1511. * CieLAB color space. An example of SLIC versus SLICO and MSLIC is ilustrated in the following picture.
  1512. *
  1513. * ![image](pics/superpixels_slic.png)
  1514. */
  1515. + (SuperpixelSLIC*)createSuperpixelSLIC:(Mat*)image algorithm:(SLICType)algorithm region_size:(int)region_size ruler:(float)ruler NS_SWIFT_NAME(createSuperpixelSLIC(image:algorithm:region_size:ruler:));
  1516. /**
  1517. * Initialize a SuperpixelSLIC object
  1518. *
  1519. * @param image Image to segment
  1520. * @param algorithm Chooses the algorithm variant to use:
  1521. * SLIC segments image using a desired region_size, and in addition SLICO will optimize using adaptive compactness factor,
  1522. * while MSLIC will optimize using manifold methods resulting in more content-sensitive superpixels.
  1523. * @param region_size Chooses an average superpixel size measured in pixels
  1524. *
  1525. * The function initializes a SuperpixelSLIC object for the input image. It sets the parameters of choosed
  1526. * superpixel algorithm, which are: region_size and ruler. It preallocate some buffers for future
  1527. * computing iterations over the given image. For enanched results it is recommended for color images to
  1528. * preprocess image with little gaussian blur using a small 3 x 3 kernel and additional conversion into
  1529. * CieLAB color space. An example of SLIC versus SLICO and MSLIC is ilustrated in the following picture.
  1530. *
  1531. * ![image](pics/superpixels_slic.png)
  1532. */
  1533. + (SuperpixelSLIC*)createSuperpixelSLIC:(Mat*)image algorithm:(SLICType)algorithm region_size:(int)region_size NS_SWIFT_NAME(createSuperpixelSLIC(image:algorithm:region_size:));
  1534. /**
  1535. * Initialize a SuperpixelSLIC object
  1536. *
  1537. * @param image Image to segment
  1538. * @param algorithm Chooses the algorithm variant to use:
  1539. * SLIC segments image using a desired region_size, and in addition SLICO will optimize using adaptive compactness factor,
  1540. * while MSLIC will optimize using manifold methods resulting in more content-sensitive superpixels.
  1541. *
  1542. * The function initializes a SuperpixelSLIC object for the input image. It sets the parameters of choosed
  1543. * superpixel algorithm, which are: region_size and ruler. It preallocate some buffers for future
  1544. * computing iterations over the given image. For enanched results it is recommended for color images to
  1545. * preprocess image with little gaussian blur using a small 3 x 3 kernel and additional conversion into
  1546. * CieLAB color space. An example of SLIC versus SLICO and MSLIC is ilustrated in the following picture.
  1547. *
  1548. * ![image](pics/superpixels_slic.png)
  1549. */
  1550. + (SuperpixelSLIC*)createSuperpixelSLIC:(Mat*)image algorithm:(SLICType)algorithm NS_SWIFT_NAME(createSuperpixelSLIC(image:algorithm:));
  1551. /**
  1552. * Initialize a SuperpixelSLIC object
  1553. *
  1554. * @param image Image to segment
  1555. * SLIC segments image using a desired region_size, and in addition SLICO will optimize using adaptive compactness factor,
  1556. * while MSLIC will optimize using manifold methods resulting in more content-sensitive superpixels.
  1557. *
  1558. * The function initializes a SuperpixelSLIC object for the input image. It sets the parameters of choosed
  1559. * superpixel algorithm, which are: region_size and ruler. It preallocate some buffers for future
  1560. * computing iterations over the given image. For enanched results it is recommended for color images to
  1561. * preprocess image with little gaussian blur using a small 3 x 3 kernel and additional conversion into
  1562. * CieLAB color space. An example of SLIC versus SLICO and MSLIC is ilustrated in the following picture.
  1563. *
  1564. * ![image](pics/superpixels_slic.png)
  1565. */
  1566. + (SuperpixelSLIC*)createSuperpixelSLIC:(Mat*)image NS_SWIFT_NAME(createSuperpixelSLIC(image:));
  1567. //
  1568. // void cv::ximgproc::createQuaternionImage(Mat img, Mat& qimg)
  1569. //
  1570. /**
  1571. * creates a quaternion image.
  1572. *
  1573. */
  1574. + (void)createQuaternionImage:(Mat*)img qimg:(Mat*)qimg NS_SWIFT_NAME(createQuaternionImage(img:qimg:));
  1575. //
  1576. // void cv::ximgproc::qconj(Mat qimg, Mat& qcimg)
  1577. //
  1578. /**
  1579. * calculates conjugate of a quaternion image.
  1580. *
  1581. */
  1582. + (void)qconj:(Mat*)qimg qcimg:(Mat*)qcimg NS_SWIFT_NAME(qconj(qimg:qcimg:));
  1583. //
  1584. // void cv::ximgproc::qunitary(Mat qimg, Mat& qnimg)
  1585. //
  1586. /**
  1587. * divides each element by its modulus.
  1588. *
  1589. */
  1590. + (void)qunitary:(Mat*)qimg qnimg:(Mat*)qnimg NS_SWIFT_NAME(qunitary(qimg:qnimg:));
  1591. //
  1592. // void cv::ximgproc::qmultiply(Mat src1, Mat src2, Mat& dst)
  1593. //
  1594. /**
  1595. * Calculates the per-element quaternion product of two arrays
  1596. *
  1597. */
  1598. + (void)qmultiply:(Mat*)src1 src2:(Mat*)src2 dst:(Mat*)dst NS_SWIFT_NAME(qmultiply(src1:src2:dst:));
  1599. //
  1600. // void cv::ximgproc::qdft(Mat img, Mat& qimg, int flags, bool sideLeft)
  1601. //
  1602. /**
  1603. * Performs a forward or inverse Discrete quaternion Fourier transform of a 2D quaternion array.
  1604. *
  1605. */
  1606. + (void)qdft:(Mat*)img qimg:(Mat*)qimg flags:(int)flags sideLeft:(BOOL)sideLeft NS_SWIFT_NAME(qdft(img:qimg:flags:sideLeft:));
  1607. //
  1608. // void cv::ximgproc::colorMatchTemplate(Mat img, Mat templ, Mat& result)
  1609. //
  1610. /**
  1611. * Compares a color template against overlapped color image regions.
  1612. *
  1613. */
  1614. + (void)colorMatchTemplate:(Mat*)img templ:(Mat*)templ result:(Mat*)result NS_SWIFT_NAME(colorMatchTemplate(img:templ:result:));
  1615. //
  1616. // Ptr_RFFeatureGetter cv::ximgproc::createRFFeatureGetter()
  1617. //
  1618. + (RFFeatureGetter*)createRFFeatureGetter NS_SWIFT_NAME(createRFFeatureGetter());
  1619. //
  1620. // Ptr_StructuredEdgeDetection cv::ximgproc::createStructuredEdgeDetection(String model, Ptr_RFFeatureGetter howToGetFeatures = Ptr<RFFeatureGetter>())
  1621. //
  1622. + (StructuredEdgeDetection*)createStructuredEdgeDetection:(NSString*)model howToGetFeatures:(RFFeatureGetter*)howToGetFeatures NS_SWIFT_NAME(createStructuredEdgeDetection(model:howToGetFeatures:));
  1623. + (StructuredEdgeDetection*)createStructuredEdgeDetection:(NSString*)model NS_SWIFT_NAME(createStructuredEdgeDetection(model:));
  1624. //
  1625. // void cv::ximgproc::findEllipses(Mat image, Mat& ellipses, float scoreThreshold = 0.7f, float reliabilityThreshold = 0.5f, float centerDistanceThreshold = 0.05f)
  1626. //
  1627. /**
  1628. * Finds ellipses fastly in an image using projective invariant pruning.
  1629. *
  1630. * The function detects ellipses in images using projective invariant pruning.
  1631. * For more details about this implementation, please see CITE: jia2017fast
  1632. * Jia, Qi et al, (2017).
  1633. * A Fast Ellipse Detector using Projective Invariant Pruning. IEEE Transactions on Image Processing.
  1634. *
  1635. * @param image input image, could be gray or color.
  1636. * @param ellipses output vector of found ellipses. each vector is encoded as five float $x, y, a, b, radius, score$.
  1637. * @param scoreThreshold float, the threshold of ellipse score.
  1638. * @param reliabilityThreshold float, the threshold of reliability.
  1639. * @param centerDistanceThreshold float, the threshold of center distance.
  1640. */
  1641. + (void)findEllipses:(Mat*)image ellipses:(Mat*)ellipses scoreThreshold:(float)scoreThreshold reliabilityThreshold:(float)reliabilityThreshold centerDistanceThreshold:(float)centerDistanceThreshold NS_SWIFT_NAME(findEllipses(image:ellipses:scoreThreshold:reliabilityThreshold:centerDistanceThreshold:));
  1642. /**
  1643. * Finds ellipses fastly in an image using projective invariant pruning.
  1644. *
  1645. * The function detects ellipses in images using projective invariant pruning.
  1646. * For more details about this implementation, please see CITE: jia2017fast
  1647. * Jia, Qi et al, (2017).
  1648. * A Fast Ellipse Detector using Projective Invariant Pruning. IEEE Transactions on Image Processing.
  1649. *
  1650. * @param image input image, could be gray or color.
  1651. * @param ellipses output vector of found ellipses. each vector is encoded as five float $x, y, a, b, radius, score$.
  1652. * @param scoreThreshold float, the threshold of ellipse score.
  1653. * @param reliabilityThreshold float, the threshold of reliability.
  1654. */
  1655. + (void)findEllipses:(Mat*)image ellipses:(Mat*)ellipses scoreThreshold:(float)scoreThreshold reliabilityThreshold:(float)reliabilityThreshold NS_SWIFT_NAME(findEllipses(image:ellipses:scoreThreshold:reliabilityThreshold:));
  1656. /**
  1657. * Finds ellipses fastly in an image using projective invariant pruning.
  1658. *
  1659. * The function detects ellipses in images using projective invariant pruning.
  1660. * For more details about this implementation, please see CITE: jia2017fast
  1661. * Jia, Qi et al, (2017).
  1662. * A Fast Ellipse Detector using Projective Invariant Pruning. IEEE Transactions on Image Processing.
  1663. *
  1664. * @param image input image, could be gray or color.
  1665. * @param ellipses output vector of found ellipses. each vector is encoded as five float $x, y, a, b, radius, score$.
  1666. * @param scoreThreshold float, the threshold of ellipse score.
  1667. */
  1668. + (void)findEllipses:(Mat*)image ellipses:(Mat*)ellipses scoreThreshold:(float)scoreThreshold NS_SWIFT_NAME(findEllipses(image:ellipses:scoreThreshold:));
  1669. /**
  1670. * Finds ellipses fastly in an image using projective invariant pruning.
  1671. *
  1672. * The function detects ellipses in images using projective invariant pruning.
  1673. * For more details about this implementation, please see CITE: jia2017fast
  1674. * Jia, Qi et al, (2017).
  1675. * A Fast Ellipse Detector using Projective Invariant Pruning. IEEE Transactions on Image Processing.
  1676. *
  1677. * @param image input image, could be gray or color.
  1678. * @param ellipses output vector of found ellipses. each vector is encoded as five float $x, y, a, b, radius, score$.
  1679. */
  1680. + (void)findEllipses:(Mat*)image ellipses:(Mat*)ellipses NS_SWIFT_NAME(findEllipses(image:ellipses:));
  1681. //
  1682. // Ptr_SuperpixelLSC cv::ximgproc::createSuperpixelLSC(Mat image, int region_size = 10, float ratio = 0.075f)
  1683. //
  1684. /**
  1685. * Class implementing the LSC (Linear Spectral Clustering) superpixels
  1686. *
  1687. * @param image Image to segment
  1688. * @param region_size Chooses an average superpixel size measured in pixels
  1689. * @param ratio Chooses the enforcement of superpixel compactness factor of superpixel
  1690. *
  1691. * The function initializes a SuperpixelLSC object for the input image. It sets the parameters of
  1692. * superpixel algorithm, which are: region_size and ruler. It preallocate some buffers for future
  1693. * computing iterations over the given image. An example of LSC is ilustrated in the following picture.
  1694. * For enanched results it is recommended for color images to preprocess image with little gaussian blur
  1695. * with a small 3 x 3 kernel and additional conversion into CieLAB color space.
  1696. *
  1697. * ![image](pics/superpixels_lsc.png)
  1698. */
  1699. + (SuperpixelLSC*)createSuperpixelLSC:(Mat*)image region_size:(int)region_size ratio:(float)ratio NS_SWIFT_NAME(createSuperpixelLSC(image:region_size:ratio:));
  1700. /**
  1701. * Class implementing the LSC (Linear Spectral Clustering) superpixels
  1702. *
  1703. * @param image Image to segment
  1704. * @param region_size Chooses an average superpixel size measured in pixels
  1705. *
  1706. * The function initializes a SuperpixelLSC object for the input image. It sets the parameters of
  1707. * superpixel algorithm, which are: region_size and ruler. It preallocate some buffers for future
  1708. * computing iterations over the given image. An example of LSC is ilustrated in the following picture.
  1709. * For enanched results it is recommended for color images to preprocess image with little gaussian blur
  1710. * with a small 3 x 3 kernel and additional conversion into CieLAB color space.
  1711. *
  1712. * ![image](pics/superpixels_lsc.png)
  1713. */
  1714. + (SuperpixelLSC*)createSuperpixelLSC:(Mat*)image region_size:(int)region_size NS_SWIFT_NAME(createSuperpixelLSC(image:region_size:));
  1715. /**
  1716. * Class implementing the LSC (Linear Spectral Clustering) superpixels
  1717. *
  1718. * @param image Image to segment
  1719. *
  1720. * The function initializes a SuperpixelLSC object for the input image. It sets the parameters of
  1721. * superpixel algorithm, which are: region_size and ruler. It preallocate some buffers for future
  1722. * computing iterations over the given image. An example of LSC is ilustrated in the following picture.
  1723. * For enanched results it is recommended for color images to preprocess image with little gaussian blur
  1724. * with a small 3 x 3 kernel and additional conversion into CieLAB color space.
  1725. *
  1726. * ![image](pics/superpixels_lsc.png)
  1727. */
  1728. + (SuperpixelLSC*)createSuperpixelLSC:(Mat*)image NS_SWIFT_NAME(createSuperpixelLSC(image:));
  1729. //
  1730. // Ptr_EdgeBoxes cv::ximgproc::createEdgeBoxes(float alpha = 0.65f, float beta = 0.75f, float eta = 1, float minScore = 0.01f, int maxBoxes = 10000, float edgeMinMag = 0.1f, float edgeMergeThr = 0.5f, float clusterMinMag = 0.5f, float maxAspectRatio = 3, float minBoxArea = 1000, float gamma = 2, float kappa = 1.5f)
  1731. //
  1732. /**
  1733. * Creates a Edgeboxes
  1734. *
  1735. * @param alpha step size of sliding window search.
  1736. * @param beta nms threshold for object proposals.
  1737. * @param eta adaptation rate for nms threshold.
  1738. * @param minScore min score of boxes to detect.
  1739. * @param maxBoxes max number of boxes to detect.
  1740. * @param edgeMinMag edge min magnitude. Increase to trade off accuracy for speed.
  1741. * @param edgeMergeThr edge merge threshold. Increase to trade off accuracy for speed.
  1742. * @param clusterMinMag cluster min magnitude. Increase to trade off accuracy for speed.
  1743. * @param maxAspectRatio max aspect ratio of boxes.
  1744. * @param minBoxArea minimum area of boxes.
  1745. * @param gamma affinity sensitivity.
  1746. * @param kappa scale sensitivity.
  1747. */
  1748. + (EdgeBoxes*)createEdgeBoxes:(float)alpha beta:(float)beta eta:(float)eta minScore:(float)minScore maxBoxes:(int)maxBoxes edgeMinMag:(float)edgeMinMag edgeMergeThr:(float)edgeMergeThr clusterMinMag:(float)clusterMinMag maxAspectRatio:(float)maxAspectRatio minBoxArea:(float)minBoxArea gamma:(float)gamma kappa:(float)kappa NS_SWIFT_NAME(createEdgeBoxes(alpha:beta:eta:minScore:maxBoxes:edgeMinMag:edgeMergeThr:clusterMinMag:maxAspectRatio:minBoxArea:gamma:kappa:));
  1749. /**
  1750. * Creates a Edgeboxes
  1751. *
  1752. * @param alpha step size of sliding window search.
  1753. * @param beta nms threshold for object proposals.
  1754. * @param eta adaptation rate for nms threshold.
  1755. * @param minScore min score of boxes to detect.
  1756. * @param maxBoxes max number of boxes to detect.
  1757. * @param edgeMinMag edge min magnitude. Increase to trade off accuracy for speed.
  1758. * @param edgeMergeThr edge merge threshold. Increase to trade off accuracy for speed.
  1759. * @param clusterMinMag cluster min magnitude. Increase to trade off accuracy for speed.
  1760. * @param maxAspectRatio max aspect ratio of boxes.
  1761. * @param minBoxArea minimum area of boxes.
  1762. * @param gamma affinity sensitivity.
  1763. */
  1764. + (EdgeBoxes*)createEdgeBoxes:(float)alpha beta:(float)beta eta:(float)eta minScore:(float)minScore maxBoxes:(int)maxBoxes edgeMinMag:(float)edgeMinMag edgeMergeThr:(float)edgeMergeThr clusterMinMag:(float)clusterMinMag maxAspectRatio:(float)maxAspectRatio minBoxArea:(float)minBoxArea gamma:(float)gamma NS_SWIFT_NAME(createEdgeBoxes(alpha:beta:eta:minScore:maxBoxes:edgeMinMag:edgeMergeThr:clusterMinMag:maxAspectRatio:minBoxArea:gamma:));
  1765. /**
  1766. * Creates a Edgeboxes
  1767. *
  1768. * @param alpha step size of sliding window search.
  1769. * @param beta nms threshold for object proposals.
  1770. * @param eta adaptation rate for nms threshold.
  1771. * @param minScore min score of boxes to detect.
  1772. * @param maxBoxes max number of boxes to detect.
  1773. * @param edgeMinMag edge min magnitude. Increase to trade off accuracy for speed.
  1774. * @param edgeMergeThr edge merge threshold. Increase to trade off accuracy for speed.
  1775. * @param clusterMinMag cluster min magnitude. Increase to trade off accuracy for speed.
  1776. * @param maxAspectRatio max aspect ratio of boxes.
  1777. * @param minBoxArea minimum area of boxes.
  1778. */
  1779. + (EdgeBoxes*)createEdgeBoxes:(float)alpha beta:(float)beta eta:(float)eta minScore:(float)minScore maxBoxes:(int)maxBoxes edgeMinMag:(float)edgeMinMag edgeMergeThr:(float)edgeMergeThr clusterMinMag:(float)clusterMinMag maxAspectRatio:(float)maxAspectRatio minBoxArea:(float)minBoxArea NS_SWIFT_NAME(createEdgeBoxes(alpha:beta:eta:minScore:maxBoxes:edgeMinMag:edgeMergeThr:clusterMinMag:maxAspectRatio:minBoxArea:));
  1780. /**
  1781. * Creates a Edgeboxes
  1782. *
  1783. * @param alpha step size of sliding window search.
  1784. * @param beta nms threshold for object proposals.
  1785. * @param eta adaptation rate for nms threshold.
  1786. * @param minScore min score of boxes to detect.
  1787. * @param maxBoxes max number of boxes to detect.
  1788. * @param edgeMinMag edge min magnitude. Increase to trade off accuracy for speed.
  1789. * @param edgeMergeThr edge merge threshold. Increase to trade off accuracy for speed.
  1790. * @param clusterMinMag cluster min magnitude. Increase to trade off accuracy for speed.
  1791. * @param maxAspectRatio max aspect ratio of boxes.
  1792. */
  1793. + (EdgeBoxes*)createEdgeBoxes:(float)alpha beta:(float)beta eta:(float)eta minScore:(float)minScore maxBoxes:(int)maxBoxes edgeMinMag:(float)edgeMinMag edgeMergeThr:(float)edgeMergeThr clusterMinMag:(float)clusterMinMag maxAspectRatio:(float)maxAspectRatio NS_SWIFT_NAME(createEdgeBoxes(alpha:beta:eta:minScore:maxBoxes:edgeMinMag:edgeMergeThr:clusterMinMag:maxAspectRatio:));
  1794. /**
  1795. * Creates a Edgeboxes
  1796. *
  1797. * @param alpha step size of sliding window search.
  1798. * @param beta nms threshold for object proposals.
  1799. * @param eta adaptation rate for nms threshold.
  1800. * @param minScore min score of boxes to detect.
  1801. * @param maxBoxes max number of boxes to detect.
  1802. * @param edgeMinMag edge min magnitude. Increase to trade off accuracy for speed.
  1803. * @param edgeMergeThr edge merge threshold. Increase to trade off accuracy for speed.
  1804. * @param clusterMinMag cluster min magnitude. Increase to trade off accuracy for speed.
  1805. */
  1806. + (EdgeBoxes*)createEdgeBoxes:(float)alpha beta:(float)beta eta:(float)eta minScore:(float)minScore maxBoxes:(int)maxBoxes edgeMinMag:(float)edgeMinMag edgeMergeThr:(float)edgeMergeThr clusterMinMag:(float)clusterMinMag NS_SWIFT_NAME(createEdgeBoxes(alpha:beta:eta:minScore:maxBoxes:edgeMinMag:edgeMergeThr:clusterMinMag:));
  1807. /**
  1808. * Creates a Edgeboxes
  1809. *
  1810. * @param alpha step size of sliding window search.
  1811. * @param beta nms threshold for object proposals.
  1812. * @param eta adaptation rate for nms threshold.
  1813. * @param minScore min score of boxes to detect.
  1814. * @param maxBoxes max number of boxes to detect.
  1815. * @param edgeMinMag edge min magnitude. Increase to trade off accuracy for speed.
  1816. * @param edgeMergeThr edge merge threshold. Increase to trade off accuracy for speed.
  1817. */
  1818. + (EdgeBoxes*)createEdgeBoxes:(float)alpha beta:(float)beta eta:(float)eta minScore:(float)minScore maxBoxes:(int)maxBoxes edgeMinMag:(float)edgeMinMag edgeMergeThr:(float)edgeMergeThr NS_SWIFT_NAME(createEdgeBoxes(alpha:beta:eta:minScore:maxBoxes:edgeMinMag:edgeMergeThr:));
  1819. /**
  1820. * Creates a Edgeboxes
  1821. *
  1822. * @param alpha step size of sliding window search.
  1823. * @param beta nms threshold for object proposals.
  1824. * @param eta adaptation rate for nms threshold.
  1825. * @param minScore min score of boxes to detect.
  1826. * @param maxBoxes max number of boxes to detect.
  1827. * @param edgeMinMag edge min magnitude. Increase to trade off accuracy for speed.
  1828. */
  1829. + (EdgeBoxes*)createEdgeBoxes:(float)alpha beta:(float)beta eta:(float)eta minScore:(float)minScore maxBoxes:(int)maxBoxes edgeMinMag:(float)edgeMinMag NS_SWIFT_NAME(createEdgeBoxes(alpha:beta:eta:minScore:maxBoxes:edgeMinMag:));
  1830. /**
  1831. * Creates a Edgeboxes
  1832. *
  1833. * @param alpha step size of sliding window search.
  1834. * @param beta nms threshold for object proposals.
  1835. * @param eta adaptation rate for nms threshold.
  1836. * @param minScore min score of boxes to detect.
  1837. * @param maxBoxes max number of boxes to detect.
  1838. */
  1839. + (EdgeBoxes*)createEdgeBoxes:(float)alpha beta:(float)beta eta:(float)eta minScore:(float)minScore maxBoxes:(int)maxBoxes NS_SWIFT_NAME(createEdgeBoxes(alpha:beta:eta:minScore:maxBoxes:));
  1840. /**
  1841. * Creates a Edgeboxes
  1842. *
  1843. * @param alpha step size of sliding window search.
  1844. * @param beta nms threshold for object proposals.
  1845. * @param eta adaptation rate for nms threshold.
  1846. * @param minScore min score of boxes to detect.
  1847. */
  1848. + (EdgeBoxes*)createEdgeBoxes:(float)alpha beta:(float)beta eta:(float)eta minScore:(float)minScore NS_SWIFT_NAME(createEdgeBoxes(alpha:beta:eta:minScore:));
  1849. /**
  1850. * Creates a Edgeboxes
  1851. *
  1852. * @param alpha step size of sliding window search.
  1853. * @param beta nms threshold for object proposals.
  1854. * @param eta adaptation rate for nms threshold.
  1855. */
  1856. + (EdgeBoxes*)createEdgeBoxes:(float)alpha beta:(float)beta eta:(float)eta NS_SWIFT_NAME(createEdgeBoxes(alpha:beta:eta:));
  1857. /**
  1858. * Creates a Edgeboxes
  1859. *
  1860. * @param alpha step size of sliding window search.
  1861. * @param beta nms threshold for object proposals.
  1862. */
  1863. + (EdgeBoxes*)createEdgeBoxes:(float)alpha beta:(float)beta NS_SWIFT_NAME(createEdgeBoxes(alpha:beta:));
  1864. /**
  1865. * Creates a Edgeboxes
  1866. *
  1867. * @param alpha step size of sliding window search.
  1868. */
  1869. + (EdgeBoxes*)createEdgeBoxes:(float)alpha NS_SWIFT_NAME(createEdgeBoxes(alpha:));
  1870. /**
  1871. * Creates a Edgeboxes
  1872. *
  1873. */
  1874. + (EdgeBoxes*)createEdgeBoxes NS_SWIFT_NAME(createEdgeBoxes());
  1875. //
  1876. // void cv::ximgproc::weightedMedianFilter(Mat joint, Mat src, Mat& dst, int r, double sigma = 25.5, WMFWeightType weightType = WMF_EXP, Mat mask = Mat())
  1877. //
  1878. /**
  1879. * Applies weighted median filter to an image.
  1880. *
  1881. * For more details about this implementation, please see CITE: zhang2014100+
  1882. *
  1883. * the pixel will be ignored when maintaining the joint-histogram. This is useful for applications like optical flow occlusion handling.
  1884. *
  1885. * @see `medianBlur`, `+jointBilateralFilter:src:dst:d:sigmaColor:sigmaSpace:borderType:`
  1886. */
  1887. + (void)weightedMedianFilter:(Mat*)joint src:(Mat*)src dst:(Mat*)dst r:(int)r sigma:(double)sigma weightType:(WMFWeightType)weightType mask:(Mat*)mask NS_SWIFT_NAME(weightedMedianFilter(joint:src:dst:r:sigma:weightType:mask:));
  1888. /**
  1889. * Applies weighted median filter to an image.
  1890. *
  1891. * For more details about this implementation, please see CITE: zhang2014100+
  1892. *
  1893. * the pixel will be ignored when maintaining the joint-histogram. This is useful for applications like optical flow occlusion handling.
  1894. *
  1895. * @see `medianBlur`, `+jointBilateralFilter:src:dst:d:sigmaColor:sigmaSpace:borderType:`
  1896. */
  1897. + (void)weightedMedianFilter:(Mat*)joint src:(Mat*)src dst:(Mat*)dst r:(int)r sigma:(double)sigma weightType:(WMFWeightType)weightType NS_SWIFT_NAME(weightedMedianFilter(joint:src:dst:r:sigma:weightType:));
  1898. /**
  1899. * Applies weighted median filter to an image.
  1900. *
  1901. * For more details about this implementation, please see CITE: zhang2014100+
  1902. *
  1903. * the pixel will be ignored when maintaining the joint-histogram. This is useful for applications like optical flow occlusion handling.
  1904. *
  1905. * @see `medianBlur`, `+jointBilateralFilter:src:dst:d:sigmaColor:sigmaSpace:borderType:`
  1906. */
  1907. + (void)weightedMedianFilter:(Mat*)joint src:(Mat*)src dst:(Mat*)dst r:(int)r sigma:(double)sigma NS_SWIFT_NAME(weightedMedianFilter(joint:src:dst:r:sigma:));
  1908. /**
  1909. * Applies weighted median filter to an image.
  1910. *
  1911. * For more details about this implementation, please see CITE: zhang2014100+
  1912. *
  1913. * the pixel will be ignored when maintaining the joint-histogram. This is useful for applications like optical flow occlusion handling.
  1914. *
  1915. * @see `medianBlur`, `+jointBilateralFilter:src:dst:d:sigmaColor:sigmaSpace:borderType:`
  1916. */
  1917. + (void)weightedMedianFilter:(Mat*)joint src:(Mat*)src dst:(Mat*)dst r:(int)r NS_SWIFT_NAME(weightedMedianFilter(joint:src:dst:r:));
  1918. //
  1919. // Ptr_GraphSegmentation cv::ximgproc::segmentation::createGraphSegmentation(double sigma = 0.5, float k = 300, int min_size = 100)
  1920. //
  1921. /**
  1922. * Creates a graph based segmentor
  1923. * @param sigma The sigma parameter, used to smooth image
  1924. * @param k The k parameter of the algorythm
  1925. * @param min_size The minimum size of segments
  1926. */
  1927. + (GraphSegmentation*)createGraphSegmentation:(double)sigma k:(float)k min_size:(int)min_size NS_SWIFT_NAME(createGraphSegmentation(sigma:k:min_size:));
  1928. /**
  1929. * Creates a graph based segmentor
  1930. * @param sigma The sigma parameter, used to smooth image
  1931. * @param k The k parameter of the algorythm
  1932. */
  1933. + (GraphSegmentation*)createGraphSegmentation:(double)sigma k:(float)k NS_SWIFT_NAME(createGraphSegmentation(sigma:k:));
  1934. /**
  1935. * Creates a graph based segmentor
  1936. * @param sigma The sigma parameter, used to smooth image
  1937. */
  1938. + (GraphSegmentation*)createGraphSegmentation:(double)sigma NS_SWIFT_NAME(createGraphSegmentation(sigma:));
  1939. /**
  1940. * Creates a graph based segmentor
  1941. */
  1942. + (GraphSegmentation*)createGraphSegmentation NS_SWIFT_NAME(createGraphSegmentation());
  1943. //
  1944. // Ptr_SelectiveSearchSegmentationStrategyColor cv::ximgproc::segmentation::createSelectiveSearchSegmentationStrategyColor()
  1945. //
  1946. /**
  1947. * Create a new color-based strategy
  1948. */
  1949. + (SelectiveSearchSegmentationStrategyColor*)createSelectiveSearchSegmentationStrategyColor NS_SWIFT_NAME(createSelectiveSearchSegmentationStrategyColor());
  1950. //
  1951. // Ptr_SelectiveSearchSegmentationStrategySize cv::ximgproc::segmentation::createSelectiveSearchSegmentationStrategySize()
  1952. //
  1953. /**
  1954. * Create a new size-based strategy
  1955. */
  1956. + (SelectiveSearchSegmentationStrategySize*)createSelectiveSearchSegmentationStrategySize NS_SWIFT_NAME(createSelectiveSearchSegmentationStrategySize());
  1957. //
  1958. // Ptr_SelectiveSearchSegmentationStrategyTexture cv::ximgproc::segmentation::createSelectiveSearchSegmentationStrategyTexture()
  1959. //
  1960. /**
  1961. * Create a new size-based strategy
  1962. */
  1963. + (SelectiveSearchSegmentationStrategyTexture*)createSelectiveSearchSegmentationStrategyTexture NS_SWIFT_NAME(createSelectiveSearchSegmentationStrategyTexture());
  1964. //
  1965. // Ptr_SelectiveSearchSegmentationStrategyFill cv::ximgproc::segmentation::createSelectiveSearchSegmentationStrategyFill()
  1966. //
  1967. /**
  1968. * Create a new fill-based strategy
  1969. */
  1970. + (SelectiveSearchSegmentationStrategyFill*)createSelectiveSearchSegmentationStrategyFill NS_SWIFT_NAME(createSelectiveSearchSegmentationStrategyFill());
  1971. //
  1972. // Ptr_SelectiveSearchSegmentationStrategyMultiple cv::ximgproc::segmentation::createSelectiveSearchSegmentationStrategyMultiple()
  1973. //
  1974. /**
  1975. * Create a new multiple strategy
  1976. */
  1977. + (SelectiveSearchSegmentationStrategyMultiple*)createSelectiveSearchSegmentationStrategyMultiple NS_SWIFT_NAME(createSelectiveSearchSegmentationStrategyMultiple());
  1978. //
  1979. // Ptr_SelectiveSearchSegmentationStrategyMultiple cv::ximgproc::segmentation::createSelectiveSearchSegmentationStrategyMultiple(Ptr_SelectiveSearchSegmentationStrategy s1)
  1980. //
  1981. /**
  1982. * Create a new multiple strategy and set one subtrategy
  1983. * @param s1 The first strategy
  1984. */
  1985. + (SelectiveSearchSegmentationStrategyMultiple*)createSelectiveSearchSegmentationStrategyMultiple:(SelectiveSearchSegmentationStrategy*)s1 NS_SWIFT_NAME(createSelectiveSearchSegmentationStrategyMultiple(s1:));
  1986. //
  1987. // Ptr_SelectiveSearchSegmentationStrategyMultiple cv::ximgproc::segmentation::createSelectiveSearchSegmentationStrategyMultiple(Ptr_SelectiveSearchSegmentationStrategy s1, Ptr_SelectiveSearchSegmentationStrategy s2)
  1988. //
  1989. /**
  1990. * Create a new multiple strategy and set two subtrategies, with equal weights
  1991. * @param s1 The first strategy
  1992. * @param s2 The second strategy
  1993. */
  1994. + (SelectiveSearchSegmentationStrategyMultiple*)createSelectiveSearchSegmentationStrategyMultiple:(SelectiveSearchSegmentationStrategy*)s1 s2:(SelectiveSearchSegmentationStrategy*)s2 NS_SWIFT_NAME(createSelectiveSearchSegmentationStrategyMultiple(s1:s2:));
  1995. //
  1996. // Ptr_SelectiveSearchSegmentationStrategyMultiple cv::ximgproc::segmentation::createSelectiveSearchSegmentationStrategyMultiple(Ptr_SelectiveSearchSegmentationStrategy s1, Ptr_SelectiveSearchSegmentationStrategy s2, Ptr_SelectiveSearchSegmentationStrategy s3)
  1997. //
  1998. /**
  1999. * Create a new multiple strategy and set three subtrategies, with equal weights
  2000. * @param s1 The first strategy
  2001. * @param s2 The second strategy
  2002. * @param s3 The third strategy
  2003. */
  2004. + (SelectiveSearchSegmentationStrategyMultiple*)createSelectiveSearchSegmentationStrategyMultiple:(SelectiveSearchSegmentationStrategy*)s1 s2:(SelectiveSearchSegmentationStrategy*)s2 s3:(SelectiveSearchSegmentationStrategy*)s3 NS_SWIFT_NAME(createSelectiveSearchSegmentationStrategyMultiple(s1:s2:s3:));
  2005. //
  2006. // Ptr_SelectiveSearchSegmentationStrategyMultiple cv::ximgproc::segmentation::createSelectiveSearchSegmentationStrategyMultiple(Ptr_SelectiveSearchSegmentationStrategy s1, Ptr_SelectiveSearchSegmentationStrategy s2, Ptr_SelectiveSearchSegmentationStrategy s3, Ptr_SelectiveSearchSegmentationStrategy s4)
  2007. //
  2008. /**
  2009. * Create a new multiple strategy and set four subtrategies, with equal weights
  2010. * @param s1 The first strategy
  2011. * @param s2 The second strategy
  2012. * @param s3 The third strategy
  2013. * @param s4 The forth strategy
  2014. */
  2015. + (SelectiveSearchSegmentationStrategyMultiple*)createSelectiveSearchSegmentationStrategyMultiple:(SelectiveSearchSegmentationStrategy*)s1 s2:(SelectiveSearchSegmentationStrategy*)s2 s3:(SelectiveSearchSegmentationStrategy*)s3 s4:(SelectiveSearchSegmentationStrategy*)s4 NS_SWIFT_NAME(createSelectiveSearchSegmentationStrategyMultiple(s1:s2:s3:s4:));
  2016. //
  2017. // Ptr_SelectiveSearchSegmentation cv::ximgproc::segmentation::createSelectiveSearchSegmentation()
  2018. //
  2019. /**
  2020. * Create a new SelectiveSearchSegmentation class.
  2021. */
  2022. + (SelectiveSearchSegmentation*)createSelectiveSearchSegmentation NS_SWIFT_NAME(createSelectiveSearchSegmentation());
  2023. //
  2024. // void cv::ximgproc::FastHoughTransform(Mat src, Mat& dst, int dstMatDepth, AngleRangeOption angleRange = ARO_315_135, HoughOp op = FHT_ADD, HoughDeskewOption makeSkew = HDO_DESKEW)
  2025. //
  2026. /**
  2027. * Calculates 2D Fast Hough transform of an image.
  2028. *
  2029. * The function calculates the fast Hough transform for full, half or quarter
  2030. * range of angles.
  2031. */
  2032. + (void)FastHoughTransform:(Mat*)src dst:(Mat*)dst dstMatDepth:(int)dstMatDepth angleRange:(AngleRangeOption)angleRange op:(HoughOp)op makeSkew:(HoughDeskewOption)makeSkew NS_SWIFT_NAME(FastHoughTransform(src:dst:dstMatDepth:angleRange:op:makeSkew:));
  2033. /**
  2034. * Calculates 2D Fast Hough transform of an image.
  2035. *
  2036. * The function calculates the fast Hough transform for full, half or quarter
  2037. * range of angles.
  2038. */
  2039. + (void)FastHoughTransform:(Mat*)src dst:(Mat*)dst dstMatDepth:(int)dstMatDepth angleRange:(AngleRangeOption)angleRange op:(HoughOp)op NS_SWIFT_NAME(FastHoughTransform(src:dst:dstMatDepth:angleRange:op:));
  2040. /**
  2041. * Calculates 2D Fast Hough transform of an image.
  2042. *
  2043. * The function calculates the fast Hough transform for full, half or quarter
  2044. * range of angles.
  2045. */
  2046. + (void)FastHoughTransform:(Mat*)src dst:(Mat*)dst dstMatDepth:(int)dstMatDepth angleRange:(AngleRangeOption)angleRange NS_SWIFT_NAME(FastHoughTransform(src:dst:dstMatDepth:angleRange:));
  2047. /**
  2048. * Calculates 2D Fast Hough transform of an image.
  2049. *
  2050. * The function calculates the fast Hough transform for full, half or quarter
  2051. * range of angles.
  2052. */
  2053. + (void)FastHoughTransform:(Mat*)src dst:(Mat*)dst dstMatDepth:(int)dstMatDepth NS_SWIFT_NAME(FastHoughTransform(src:dst:dstMatDepth:));
  2054. //
  2055. // Vec4i cv::ximgproc::HoughPoint2Line(Point houghPoint, Mat srcImgInfo, AngleRangeOption angleRange = ARO_315_135, HoughDeskewOption makeSkew = HDO_DESKEW, int rules = RO_IGNORE_BORDERS)
  2056. //
  2057. /**
  2058. * Calculates coordinates of line segment corresponded by point in Hough space.
  2059. * @retval [Vec4i] Coordinates of line segment corresponded by point in Hough space.
  2060. * @remarks If rules parameter set to RO_STRICT
  2061. * then returned line cut along the border of source image.
  2062. * @remarks If rules parameter set to RO_WEAK then in case of point, which belongs
  2063. * the incorrect part of Hough image, returned line will not intersect source image.
  2064. *
  2065. * The function calculates coordinates of line segment corresponded by point in Hough space.
  2066. */
  2067. + (Int4*)HoughPoint2Line:(Point2i*)houghPoint srcImgInfo:(Mat*)srcImgInfo angleRange:(AngleRangeOption)angleRange makeSkew:(HoughDeskewOption)makeSkew rules:(int)rules NS_SWIFT_NAME(HoughPoint2Line(houghPoint:srcImgInfo:angleRange:makeSkew:rules:));
  2068. /**
  2069. * Calculates coordinates of line segment corresponded by point in Hough space.
  2070. * @retval [Vec4i] Coordinates of line segment corresponded by point in Hough space.
  2071. * @remarks If rules parameter set to RO_STRICT
  2072. * then returned line cut along the border of source image.
  2073. * @remarks If rules parameter set to RO_WEAK then in case of point, which belongs
  2074. * the incorrect part of Hough image, returned line will not intersect source image.
  2075. *
  2076. * The function calculates coordinates of line segment corresponded by point in Hough space.
  2077. */
  2078. + (Int4*)HoughPoint2Line:(Point2i*)houghPoint srcImgInfo:(Mat*)srcImgInfo angleRange:(AngleRangeOption)angleRange makeSkew:(HoughDeskewOption)makeSkew NS_SWIFT_NAME(HoughPoint2Line(houghPoint:srcImgInfo:angleRange:makeSkew:));
  2079. /**
  2080. * Calculates coordinates of line segment corresponded by point in Hough space.
  2081. * @retval [Vec4i] Coordinates of line segment corresponded by point in Hough space.
  2082. * @remarks If rules parameter set to RO_STRICT
  2083. * then returned line cut along the border of source image.
  2084. * @remarks If rules parameter set to RO_WEAK then in case of point, which belongs
  2085. * the incorrect part of Hough image, returned line will not intersect source image.
  2086. *
  2087. * The function calculates coordinates of line segment corresponded by point in Hough space.
  2088. */
  2089. + (Int4*)HoughPoint2Line:(Point2i*)houghPoint srcImgInfo:(Mat*)srcImgInfo angleRange:(AngleRangeOption)angleRange NS_SWIFT_NAME(HoughPoint2Line(houghPoint:srcImgInfo:angleRange:));
  2090. /**
  2091. * Calculates coordinates of line segment corresponded by point in Hough space.
  2092. * @retval [Vec4i] Coordinates of line segment corresponded by point in Hough space.
  2093. * @remarks If rules parameter set to RO_STRICT
  2094. * then returned line cut along the border of source image.
  2095. * @remarks If rules parameter set to RO_WEAK then in case of point, which belongs
  2096. * the incorrect part of Hough image, returned line will not intersect source image.
  2097. *
  2098. * The function calculates coordinates of line segment corresponded by point in Hough space.
  2099. */
  2100. + (Int4*)HoughPoint2Line:(Point2i*)houghPoint srcImgInfo:(Mat*)srcImgInfo NS_SWIFT_NAME(HoughPoint2Line(houghPoint:srcImgInfo:));
  2101. //
  2102. // void cv::ximgproc::PeiLinNormalization(Mat I, Mat& T)
  2103. //
  2104. + (void)PeiLinNormalization:(Mat*)I T:(Mat*)T NS_SWIFT_NAME(PeiLinNormalization(I:T:));
  2105. //
  2106. // void cv::ximgproc::fourierDescriptor(Mat src, Mat& dst, int nbElt = -1, int nbFD = -1)
  2107. //
  2108. /**
  2109. * Fourier descriptors for planed closed curves
  2110. *
  2111. * For more details about this implementation, please see CITE: PersoonFu1977
  2112. *
  2113. *
  2114. */
  2115. + (void)fourierDescriptor:(Mat*)src dst:(Mat*)dst nbElt:(int)nbElt nbFD:(int)nbFD NS_SWIFT_NAME(fourierDescriptor(src:dst:nbElt:nbFD:));
  2116. /**
  2117. * Fourier descriptors for planed closed curves
  2118. *
  2119. * For more details about this implementation, please see CITE: PersoonFu1977
  2120. *
  2121. *
  2122. */
  2123. + (void)fourierDescriptor:(Mat*)src dst:(Mat*)dst nbElt:(int)nbElt NS_SWIFT_NAME(fourierDescriptor(src:dst:nbElt:));
  2124. /**
  2125. * Fourier descriptors for planed closed curves
  2126. *
  2127. * For more details about this implementation, please see CITE: PersoonFu1977
  2128. *
  2129. *
  2130. */
  2131. + (void)fourierDescriptor:(Mat*)src dst:(Mat*)dst NS_SWIFT_NAME(fourierDescriptor(src:dst:));
  2132. //
  2133. // void cv::ximgproc::transformFD(Mat src, Mat t, Mat& dst, bool fdContour = true)
  2134. //
  2135. /**
  2136. * transform a contour
  2137. *
  2138. *
  2139. */
  2140. + (void)transformFD:(Mat*)src t:(Mat*)t dst:(Mat*)dst fdContour:(BOOL)fdContour NS_SWIFT_NAME(transformFD(src:t:dst:fdContour:));
  2141. /**
  2142. * transform a contour
  2143. *
  2144. *
  2145. */
  2146. + (void)transformFD:(Mat*)src t:(Mat*)t dst:(Mat*)dst NS_SWIFT_NAME(transformFD(src:t:dst:));
  2147. //
  2148. // void cv::ximgproc::contourSampling(Mat src, Mat& out, int nbElt)
  2149. //
  2150. /**
  2151. * Contour sampling .
  2152. *
  2153. *
  2154. */
  2155. + (void)contourSampling:(Mat*)src out:(Mat*)out nbElt:(int)nbElt NS_SWIFT_NAME(contourSampling(src:out:nbElt:));
  2156. //
  2157. // Ptr_ContourFitting cv::ximgproc::createContourFitting(int ctr = 1024, int fd = 16)
  2158. //
  2159. /**
  2160. * create ContourFitting algorithm object
  2161. *
  2162. * @param ctr number of Fourier descriptors equal to number of contour points after resampling.
  2163. * @param fd Contour defining second shape (Target).
  2164. */
  2165. + (ContourFitting*)createContourFitting:(int)ctr fd:(int)fd NS_SWIFT_NAME(createContourFitting(ctr:fd:));
  2166. /**
  2167. * create ContourFitting algorithm object
  2168. *
  2169. * @param ctr number of Fourier descriptors equal to number of contour points after resampling.
  2170. */
  2171. + (ContourFitting*)createContourFitting:(int)ctr NS_SWIFT_NAME(createContourFitting(ctr:));
  2172. /**
  2173. * create ContourFitting algorithm object
  2174. *
  2175. */
  2176. + (ContourFitting*)createContourFitting NS_SWIFT_NAME(createContourFitting());
  2177. //
  2178. // Ptr_EdgeAwareInterpolator cv::ximgproc::createEdgeAwareInterpolator()
  2179. //
  2180. /**
  2181. * Factory method that creates an instance of the
  2182. * EdgeAwareInterpolator.
  2183. */
  2184. + (EdgeAwareInterpolator*)createEdgeAwareInterpolator NS_SWIFT_NAME(createEdgeAwareInterpolator());
  2185. //
  2186. // Ptr_RICInterpolator cv::ximgproc::createRICInterpolator()
  2187. //
  2188. /**
  2189. * Factory method that creates an instance of the
  2190. * RICInterpolator.
  2191. */
  2192. + (RICInterpolator*)createRICInterpolator NS_SWIFT_NAME(createRICInterpolator());
  2193. //
  2194. // Ptr_EdgeDrawing cv::ximgproc::createEdgeDrawing()
  2195. //
  2196. /**
  2197. * Creates a smart pointer to a EdgeDrawing object and initializes it
  2198. */
  2199. + (EdgeDrawing*)createEdgeDrawing NS_SWIFT_NAME(createEdgeDrawing());
  2200. //
  2201. // void cv::ximgproc::RadonTransform(Mat src, Mat& dst, double theta = 1, double start_angle = 0, double end_angle = 180, bool crop = false, bool norm = false)
  2202. //
  2203. /**
  2204. * Calculate Radon Transform of an image.
  2205. *
  2206. * This function calculates the Radon Transform of a given image in any range.
  2207. * See https://engineering.purdue.edu/~malcolm/pct/CTI_Ch03.pdf for detail.
  2208. * If the input type is CV_8U, the output will be CV_32S.
  2209. * If the input type is CV_32F or CV_64F, the output will be CV_64F
  2210. * The output size will be num_of_integral x src_diagonal_length.
  2211. * If crop is selected, the input image will be crop into square then circle,
  2212. * and output size will be num_of_integral x min_edge.
  2213. *
  2214. */
  2215. + (void)RadonTransform:(Mat*)src dst:(Mat*)dst theta:(double)theta start_angle:(double)start_angle end_angle:(double)end_angle crop:(BOOL)crop norm:(BOOL)norm NS_SWIFT_NAME(RadonTransform(src:dst:theta:start_angle:end_angle:crop:norm:));
  2216. /**
  2217. * Calculate Radon Transform of an image.
  2218. *
  2219. * This function calculates the Radon Transform of a given image in any range.
  2220. * See https://engineering.purdue.edu/~malcolm/pct/CTI_Ch03.pdf for detail.
  2221. * If the input type is CV_8U, the output will be CV_32S.
  2222. * If the input type is CV_32F or CV_64F, the output will be CV_64F
  2223. * The output size will be num_of_integral x src_diagonal_length.
  2224. * If crop is selected, the input image will be crop into square then circle,
  2225. * and output size will be num_of_integral x min_edge.
  2226. *
  2227. */
  2228. + (void)RadonTransform:(Mat*)src dst:(Mat*)dst theta:(double)theta start_angle:(double)start_angle end_angle:(double)end_angle crop:(BOOL)crop NS_SWIFT_NAME(RadonTransform(src:dst:theta:start_angle:end_angle:crop:));
  2229. /**
  2230. * Calculate Radon Transform of an image.
  2231. *
  2232. * This function calculates the Radon Transform of a given image in any range.
  2233. * See https://engineering.purdue.edu/~malcolm/pct/CTI_Ch03.pdf for detail.
  2234. * If the input type is CV_8U, the output will be CV_32S.
  2235. * If the input type is CV_32F or CV_64F, the output will be CV_64F
  2236. * The output size will be num_of_integral x src_diagonal_length.
  2237. * If crop is selected, the input image will be crop into square then circle,
  2238. * and output size will be num_of_integral x min_edge.
  2239. *
  2240. */
  2241. + (void)RadonTransform:(Mat*)src dst:(Mat*)dst theta:(double)theta start_angle:(double)start_angle end_angle:(double)end_angle NS_SWIFT_NAME(RadonTransform(src:dst:theta:start_angle:end_angle:));
  2242. /**
  2243. * Calculate Radon Transform of an image.
  2244. *
  2245. * This function calculates the Radon Transform of a given image in any range.
  2246. * See https://engineering.purdue.edu/~malcolm/pct/CTI_Ch03.pdf for detail.
  2247. * If the input type is CV_8U, the output will be CV_32S.
  2248. * If the input type is CV_32F or CV_64F, the output will be CV_64F
  2249. * The output size will be num_of_integral x src_diagonal_length.
  2250. * If crop is selected, the input image will be crop into square then circle,
  2251. * and output size will be num_of_integral x min_edge.
  2252. *
  2253. */
  2254. + (void)RadonTransform:(Mat*)src dst:(Mat*)dst theta:(double)theta start_angle:(double)start_angle NS_SWIFT_NAME(RadonTransform(src:dst:theta:start_angle:));
  2255. /**
  2256. * Calculate Radon Transform of an image.
  2257. *
  2258. * This function calculates the Radon Transform of a given image in any range.
  2259. * See https://engineering.purdue.edu/~malcolm/pct/CTI_Ch03.pdf for detail.
  2260. * If the input type is CV_8U, the output will be CV_32S.
  2261. * If the input type is CV_32F or CV_64F, the output will be CV_64F
  2262. * The output size will be num_of_integral x src_diagonal_length.
  2263. * If crop is selected, the input image will be crop into square then circle,
  2264. * and output size will be num_of_integral x min_edge.
  2265. *
  2266. */
  2267. + (void)RadonTransform:(Mat*)src dst:(Mat*)dst theta:(double)theta NS_SWIFT_NAME(RadonTransform(src:dst:theta:));
  2268. /**
  2269. * Calculate Radon Transform of an image.
  2270. *
  2271. * This function calculates the Radon Transform of a given image in any range.
  2272. * See https://engineering.purdue.edu/~malcolm/pct/CTI_Ch03.pdf for detail.
  2273. * If the input type is CV_8U, the output will be CV_32S.
  2274. * If the input type is CV_32F or CV_64F, the output will be CV_64F
  2275. * The output size will be num_of_integral x src_diagonal_length.
  2276. * If crop is selected, the input image will be crop into square then circle,
  2277. * and output size will be num_of_integral x min_edge.
  2278. *
  2279. */
  2280. + (void)RadonTransform:(Mat*)src dst:(Mat*)dst NS_SWIFT_NAME(RadonTransform(src:dst:));
  2281. //
  2282. // Ptr_SuperpixelSEEDS cv::ximgproc::createSuperpixelSEEDS(int image_width, int image_height, int image_channels, int num_superpixels, int num_levels, int prior = 2, int histogram_bins = 5, bool double_step = false)
  2283. //
  2284. /**
  2285. * Initializes a SuperpixelSEEDS object.
  2286. *
  2287. * @param image_width Image width.
  2288. * @param image_height Image height.
  2289. * @param image_channels Number of channels of the image.
  2290. * @param num_superpixels Desired number of superpixels. Note that the actual number may be smaller
  2291. * due to restrictions (depending on the image size and num_levels). Use getNumberOfSuperpixels() to
  2292. * get the actual number.
  2293. * @param num_levels Number of block levels. The more levels, the more accurate is the segmentation,
  2294. * but needs more memory and CPU time.
  2295. * @param prior enable 3x3 shape smoothing term if \>0. A larger value leads to smoother shapes. prior
  2296. * must be in the range [0, 5].
  2297. * @param histogram_bins Number of histogram bins.
  2298. * @param double_step If true, iterate each block level twice for higher accuracy.
  2299. *
  2300. * The function initializes a SuperpixelSEEDS object for the input image. It stores the parameters of
  2301. * the image: image_width, image_height and image_channels. It also sets the parameters of the SEEDS
  2302. * superpixel algorithm, which are: num_superpixels, num_levels, use_prior, histogram_bins and
  2303. * double_step.
  2304. *
  2305. * The number of levels in num_levels defines the amount of block levels that the algorithm use in the
  2306. * optimization. The initialization is a grid, in which the superpixels are equally distributed through
  2307. * the width and the height of the image. The larger blocks correspond to the superpixel size, and the
  2308. * levels with smaller blocks are formed by dividing the larger blocks into 2 x 2 blocks of pixels,
  2309. * recursively until the smaller block level. An example of initialization of 4 block levels is
  2310. * illustrated in the following figure.
  2311. *
  2312. * ![image](pics/superpixels_blocks.png)
  2313. */
  2314. + (SuperpixelSEEDS*)createSuperpixelSEEDS:(int)image_width image_height:(int)image_height image_channels:(int)image_channels num_superpixels:(int)num_superpixels num_levels:(int)num_levels prior:(int)prior histogram_bins:(int)histogram_bins double_step:(BOOL)double_step NS_SWIFT_NAME(createSuperpixelSEEDS(image_width:image_height:image_channels:num_superpixels:num_levels:prior:histogram_bins:double_step:));
  2315. /**
  2316. * Initializes a SuperpixelSEEDS object.
  2317. *
  2318. * @param image_width Image width.
  2319. * @param image_height Image height.
  2320. * @param image_channels Number of channels of the image.
  2321. * @param num_superpixels Desired number of superpixels. Note that the actual number may be smaller
  2322. * due to restrictions (depending on the image size and num_levels). Use getNumberOfSuperpixels() to
  2323. * get the actual number.
  2324. * @param num_levels Number of block levels. The more levels, the more accurate is the segmentation,
  2325. * but needs more memory and CPU time.
  2326. * @param prior enable 3x3 shape smoothing term if \>0. A larger value leads to smoother shapes. prior
  2327. * must be in the range [0, 5].
  2328. * @param histogram_bins Number of histogram bins.
  2329. *
  2330. * The function initializes a SuperpixelSEEDS object for the input image. It stores the parameters of
  2331. * the image: image_width, image_height and image_channels. It also sets the parameters of the SEEDS
  2332. * superpixel algorithm, which are: num_superpixels, num_levels, use_prior, histogram_bins and
  2333. * double_step.
  2334. *
  2335. * The number of levels in num_levels defines the amount of block levels that the algorithm use in the
  2336. * optimization. The initialization is a grid, in which the superpixels are equally distributed through
  2337. * the width and the height of the image. The larger blocks correspond to the superpixel size, and the
  2338. * levels with smaller blocks are formed by dividing the larger blocks into 2 x 2 blocks of pixels,
  2339. * recursively until the smaller block level. An example of initialization of 4 block levels is
  2340. * illustrated in the following figure.
  2341. *
  2342. * ![image](pics/superpixels_blocks.png)
  2343. */
  2344. + (SuperpixelSEEDS*)createSuperpixelSEEDS:(int)image_width image_height:(int)image_height image_channels:(int)image_channels num_superpixels:(int)num_superpixels num_levels:(int)num_levels prior:(int)prior histogram_bins:(int)histogram_bins NS_SWIFT_NAME(createSuperpixelSEEDS(image_width:image_height:image_channels:num_superpixels:num_levels:prior:histogram_bins:));
  2345. /**
  2346. * Initializes a SuperpixelSEEDS object.
  2347. *
  2348. * @param image_width Image width.
  2349. * @param image_height Image height.
  2350. * @param image_channels Number of channels of the image.
  2351. * @param num_superpixels Desired number of superpixels. Note that the actual number may be smaller
  2352. * due to restrictions (depending on the image size and num_levels). Use getNumberOfSuperpixels() to
  2353. * get the actual number.
  2354. * @param num_levels Number of block levels. The more levels, the more accurate is the segmentation,
  2355. * but needs more memory and CPU time.
  2356. * @param prior enable 3x3 shape smoothing term if \>0. A larger value leads to smoother shapes. prior
  2357. * must be in the range [0, 5].
  2358. *
  2359. * The function initializes a SuperpixelSEEDS object for the input image. It stores the parameters of
  2360. * the image: image_width, image_height and image_channels. It also sets the parameters of the SEEDS
  2361. * superpixel algorithm, which are: num_superpixels, num_levels, use_prior, histogram_bins and
  2362. * double_step.
  2363. *
  2364. * The number of levels in num_levels defines the amount of block levels that the algorithm use in the
  2365. * optimization. The initialization is a grid, in which the superpixels are equally distributed through
  2366. * the width and the height of the image. The larger blocks correspond to the superpixel size, and the
  2367. * levels with smaller blocks are formed by dividing the larger blocks into 2 x 2 blocks of pixels,
  2368. * recursively until the smaller block level. An example of initialization of 4 block levels is
  2369. * illustrated in the following figure.
  2370. *
  2371. * ![image](pics/superpixels_blocks.png)
  2372. */
  2373. + (SuperpixelSEEDS*)createSuperpixelSEEDS:(int)image_width image_height:(int)image_height image_channels:(int)image_channels num_superpixels:(int)num_superpixels num_levels:(int)num_levels prior:(int)prior NS_SWIFT_NAME(createSuperpixelSEEDS(image_width:image_height:image_channels:num_superpixels:num_levels:prior:));
  2374. /**
  2375. * Initializes a SuperpixelSEEDS object.
  2376. *
  2377. * @param image_width Image width.
  2378. * @param image_height Image height.
  2379. * @param image_channels Number of channels of the image.
  2380. * @param num_superpixels Desired number of superpixels. Note that the actual number may be smaller
  2381. * due to restrictions (depending on the image size and num_levels). Use getNumberOfSuperpixels() to
  2382. * get the actual number.
  2383. * @param num_levels Number of block levels. The more levels, the more accurate is the segmentation,
  2384. * but needs more memory and CPU time.
  2385. * must be in the range [0, 5].
  2386. *
  2387. * The function initializes a SuperpixelSEEDS object for the input image. It stores the parameters of
  2388. * the image: image_width, image_height and image_channels. It also sets the parameters of the SEEDS
  2389. * superpixel algorithm, which are: num_superpixels, num_levels, use_prior, histogram_bins and
  2390. * double_step.
  2391. *
  2392. * The number of levels in num_levels defines the amount of block levels that the algorithm use in the
  2393. * optimization. The initialization is a grid, in which the superpixels are equally distributed through
  2394. * the width and the height of the image. The larger blocks correspond to the superpixel size, and the
  2395. * levels with smaller blocks are formed by dividing the larger blocks into 2 x 2 blocks of pixels,
  2396. * recursively until the smaller block level. An example of initialization of 4 block levels is
  2397. * illustrated in the following figure.
  2398. *
  2399. * ![image](pics/superpixels_blocks.png)
  2400. */
  2401. + (SuperpixelSEEDS*)createSuperpixelSEEDS:(int)image_width image_height:(int)image_height image_channels:(int)image_channels num_superpixels:(int)num_superpixels num_levels:(int)num_levels NS_SWIFT_NAME(createSuperpixelSEEDS(image_width:image_height:image_channels:num_superpixels:num_levels:));
  2402. //
  2403. // Ptr_FastLineDetector cv::ximgproc::createFastLineDetector(int length_threshold = 10, float distance_threshold = 1.414213562f, double canny_th1 = 50.0, double canny_th2 = 50.0, int canny_aperture_size = 3, bool do_merge = false)
  2404. //
  2405. /**
  2406. * Creates a smart pointer to a FastLineDetector object and initializes it
  2407. *
  2408. * @param length_threshold Segment shorter than this will be discarded
  2409. * @param distance_threshold A point placed from a hypothesis line
  2410. * segment farther than this will be regarded as an outlier
  2411. * @param canny_th1 First threshold for hysteresis procedure in Canny()
  2412. * @param canny_th2 Second threshold for hysteresis procedure in Canny()
  2413. * @param canny_aperture_size Aperturesize for the sobel operator in Canny().
  2414. * If zero, Canny() is not applied and the input image is taken as an edge image.
  2415. * @param do_merge If true, incremental merging of segments will be performed
  2416. */
  2417. + (FastLineDetector*)createFastLineDetector:(int)length_threshold distance_threshold:(float)distance_threshold canny_th1:(double)canny_th1 canny_th2:(double)canny_th2 canny_aperture_size:(int)canny_aperture_size do_merge:(BOOL)do_merge NS_SWIFT_NAME(createFastLineDetector(length_threshold:distance_threshold:canny_th1:canny_th2:canny_aperture_size:do_merge:));
  2418. /**
  2419. * Creates a smart pointer to a FastLineDetector object and initializes it
  2420. *
  2421. * @param length_threshold Segment shorter than this will be discarded
  2422. * @param distance_threshold A point placed from a hypothesis line
  2423. * segment farther than this will be regarded as an outlier
  2424. * @param canny_th1 First threshold for hysteresis procedure in Canny()
  2425. * @param canny_th2 Second threshold for hysteresis procedure in Canny()
  2426. * @param canny_aperture_size Aperturesize for the sobel operator in Canny().
  2427. * If zero, Canny() is not applied and the input image is taken as an edge image.
  2428. */
  2429. + (FastLineDetector*)createFastLineDetector:(int)length_threshold distance_threshold:(float)distance_threshold canny_th1:(double)canny_th1 canny_th2:(double)canny_th2 canny_aperture_size:(int)canny_aperture_size NS_SWIFT_NAME(createFastLineDetector(length_threshold:distance_threshold:canny_th1:canny_th2:canny_aperture_size:));
  2430. /**
  2431. * Creates a smart pointer to a FastLineDetector object and initializes it
  2432. *
  2433. * @param length_threshold Segment shorter than this will be discarded
  2434. * @param distance_threshold A point placed from a hypothesis line
  2435. * segment farther than this will be regarded as an outlier
  2436. * @param canny_th1 First threshold for hysteresis procedure in Canny()
  2437. * @param canny_th2 Second threshold for hysteresis procedure in Canny()
  2438. * If zero, Canny() is not applied and the input image is taken as an edge image.
  2439. */
  2440. + (FastLineDetector*)createFastLineDetector:(int)length_threshold distance_threshold:(float)distance_threshold canny_th1:(double)canny_th1 canny_th2:(double)canny_th2 NS_SWIFT_NAME(createFastLineDetector(length_threshold:distance_threshold:canny_th1:canny_th2:));
  2441. /**
  2442. * Creates a smart pointer to a FastLineDetector object and initializes it
  2443. *
  2444. * @param length_threshold Segment shorter than this will be discarded
  2445. * @param distance_threshold A point placed from a hypothesis line
  2446. * segment farther than this will be regarded as an outlier
  2447. * @param canny_th1 First threshold for hysteresis procedure in Canny()
  2448. * If zero, Canny() is not applied and the input image is taken as an edge image.
  2449. */
  2450. + (FastLineDetector*)createFastLineDetector:(int)length_threshold distance_threshold:(float)distance_threshold canny_th1:(double)canny_th1 NS_SWIFT_NAME(createFastLineDetector(length_threshold:distance_threshold:canny_th1:));
  2451. /**
  2452. * Creates a smart pointer to a FastLineDetector object and initializes it
  2453. *
  2454. * @param length_threshold Segment shorter than this will be discarded
  2455. * @param distance_threshold A point placed from a hypothesis line
  2456. * segment farther than this will be regarded as an outlier
  2457. * If zero, Canny() is not applied and the input image is taken as an edge image.
  2458. */
  2459. + (FastLineDetector*)createFastLineDetector:(int)length_threshold distance_threshold:(float)distance_threshold NS_SWIFT_NAME(createFastLineDetector(length_threshold:distance_threshold:));
  2460. /**
  2461. * Creates a smart pointer to a FastLineDetector object and initializes it
  2462. *
  2463. * @param length_threshold Segment shorter than this will be discarded
  2464. * segment farther than this will be regarded as an outlier
  2465. * If zero, Canny() is not applied and the input image is taken as an edge image.
  2466. */
  2467. + (FastLineDetector*)createFastLineDetector:(int)length_threshold NS_SWIFT_NAME(createFastLineDetector(length_threshold:));
  2468. /**
  2469. * Creates a smart pointer to a FastLineDetector object and initializes it
  2470. *
  2471. * segment farther than this will be regarded as an outlier
  2472. * If zero, Canny() is not applied and the input image is taken as an edge image.
  2473. */
  2474. + (FastLineDetector*)createFastLineDetector NS_SWIFT_NAME(createFastLineDetector());
  2475. //
  2476. // void cv::ximgproc::covarianceEstimation(Mat src, Mat& dst, int windowRows, int windowCols)
  2477. //
  2478. /**
  2479. * Computes the estimated covariance matrix of an image using the sliding
  2480. * window forumlation.
  2481. *
  2482. * @param src The source image. Input image must be of a complex type.
  2483. * @param dst The destination estimated covariance matrix. Output matrix will be size (windowRows*windowCols, windowRows*windowCols).
  2484. * @param windowRows The number of rows in the window.
  2485. * @param windowCols The number of cols in the window.
  2486. * The window size parameters control the accuracy of the estimation.
  2487. * The sliding window moves over the entire image from the top-left corner
  2488. * to the bottom right corner. Each location of the window represents a sample.
  2489. * If the window is the size of the image, then this gives the exact covariance matrix.
  2490. * For all other cases, the sizes of the window will impact the number of samples
  2491. * and the number of elements in the estimated covariance matrix.
  2492. */
  2493. + (void)covarianceEstimation:(Mat*)src dst:(Mat*)dst windowRows:(int)windowRows windowCols:(int)windowCols NS_SWIFT_NAME(covarianceEstimation(src:dst:windowRows:windowCols:));
  2494. @end
  2495. NS_ASSUME_NONNULL_END