Retina.h 64 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968
  1. //
  2. // This file is auto-generated. Please don't modify it!
  3. //
  4. #pragma once
  5. #ifdef __cplusplus
  6. //#import "opencv.hpp"
  7. #import "opencv2/bioinspired.hpp"
  8. #import "opencv2/bioinspired/retina.hpp"
  9. #else
  10. #define CV_EXPORTS
  11. #endif
  12. #import <Foundation/Foundation.h>
  13. #import "Algorithm.h"
  14. @class Mat;
  15. @class Size2i;
  16. NS_ASSUME_NONNULL_BEGIN
  17. // C++: class Retina
  18. /**
  19. * class which allows the Gipsa/Listic Labs model to be used with OpenCV.
  20. *
  21. * This retina model allows spatio-temporal image processing (applied on still images, video sequences).
  22. * As a summary, these are the retina model properties:
  23. * - It applies a spectral whithening (mid-frequency details enhancement)
  24. * - high frequency spatio-temporal noise reduction
  25. * - low frequency luminance to be reduced (luminance range compression)
  26. * - local logarithmic luminance compression allows details to be enhanced in low light conditions
  27. *
  28. * USE : this model can be used basically for spatio-temporal video effects but also for :
  29. * _using the getParvo method output matrix : texture analysiswith enhanced signal to noise ratio and enhanced details robust against input images luminance ranges
  30. * _using the getMagno method output matrix : motion analysis also with the previously cited properties
  31. *
  32. * for more information, reer to the following papers :
  33. * Benoit A., Caplier A., Durette B., Herault, J., "USING HUMAN VISUAL SYSTEM MODELING FOR BIO-INSPIRED LOW LEVEL IMAGE PROCESSING", Elsevier, Computer Vision and Image Understanding 114 (2010), pp. 758-773, DOI: http://dx.doi.org/10.1016/j.cviu.2010.01.011
  34. * Vision: Images, Signals and Neural Networks: Models of Neural Processing in Visual Perception (Progress in Neural Processing),By: Jeanny Herault, ISBN: 9814273686. WAPI (Tower ID): 113266891.
  35. *
  36. * The retina filter includes the research contributions of phd/research collegues from which code has been redrawn by the author :
  37. * take a look at the retinacolor.hpp module to discover Brice Chaix de Lavarene color mosaicing/demosaicing and the reference paper:
  38. * B. Chaix de Lavarene, D. Alleysson, B. Durette, J. Herault (2007). "Efficient demosaicing through recursive filtering", IEEE International Conference on Image Processing ICIP 2007
  39. * take a look at imagelogpolprojection.hpp to discover retina spatial log sampling which originates from Barthelemy Durette phd with Jeanny Herault. A Retina / V1 cortex projection is also proposed and originates from Jeanny's discussions.
  40. * more informations in the above cited Jeanny Heraults's book.
  41. *
  42. * Member of `Bioinspired`
  43. */
  44. CV_EXPORTS @interface Retina : Algorithm
  45. #ifdef __cplusplus
  46. @property(readonly)cv::Ptr<cv::bioinspired::Retina> nativePtrRetina;
  47. #endif
  48. #ifdef __cplusplus
  49. - (instancetype)initWithNativePtr:(cv::Ptr<cv::bioinspired::Retina>)nativePtr;
  50. + (instancetype)fromNative:(cv::Ptr<cv::bioinspired::Retina>)nativePtr;
  51. #endif
  52. #pragma mark - Methods
  53. //
  54. // Size cv::bioinspired::Retina::getInputSize()
  55. //
  56. /**
  57. * Retreive retina input buffer size
  58. * @return the retina input buffer size
  59. */
  60. - (Size2i*)getInputSize NS_SWIFT_NAME(getInputSize());
  61. //
  62. // Size cv::bioinspired::Retina::getOutputSize()
  63. //
  64. /**
  65. * Retreive retina output buffer size that can be different from the input if a spatial log
  66. * transformation is applied
  67. * @return the retina output buffer size
  68. */
  69. - (Size2i*)getOutputSize NS_SWIFT_NAME(getOutputSize());
  70. //
  71. // void cv::bioinspired::Retina::setup(String retinaParameterFile = "", bool applyDefaultSetupOnFailure = true)
  72. //
  73. /**
  74. * Try to open an XML retina parameters file to adjust current retina instance setup
  75. *
  76. * - if the xml file does not exist, then default setup is applied
  77. * - warning, Exceptions are thrown if read XML file is not valid
  78. * @param retinaParameterFile the parameters filename
  79. * @param applyDefaultSetupOnFailure set to true if an error must be thrown on error
  80. *
  81. * You can retrieve the current parameters structure using the method Retina::getParameters and update
  82. * it before running method Retina::setup.
  83. */
  84. - (void)setup:(NSString*)retinaParameterFile applyDefaultSetupOnFailure:(BOOL)applyDefaultSetupOnFailure NS_SWIFT_NAME(setup(retinaParameterFile:applyDefaultSetupOnFailure:));
  85. /**
  86. * Try to open an XML retina parameters file to adjust current retina instance setup
  87. *
  88. * - if the xml file does not exist, then default setup is applied
  89. * - warning, Exceptions are thrown if read XML file is not valid
  90. * @param retinaParameterFile the parameters filename
  91. *
  92. * You can retrieve the current parameters structure using the method Retina::getParameters and update
  93. * it before running method Retina::setup.
  94. */
  95. - (void)setup:(NSString*)retinaParameterFile NS_SWIFT_NAME(setup(retinaParameterFile:));
  96. /**
  97. * Try to open an XML retina parameters file to adjust current retina instance setup
  98. *
  99. * - if the xml file does not exist, then default setup is applied
  100. * - warning, Exceptions are thrown if read XML file is not valid
  101. *
  102. * You can retrieve the current parameters structure using the method Retina::getParameters and update
  103. * it before running method Retina::setup.
  104. */
  105. - (void)setup NS_SWIFT_NAME(setup());
  106. //
  107. // String cv::bioinspired::Retina::printSetup()
  108. //
  109. /**
  110. * Outputs a string showing the used parameters setup
  111. * @return a string which contains formated parameters information
  112. */
  113. - (NSString*)printSetup NS_SWIFT_NAME(printSetup());
  114. //
  115. // void cv::bioinspired::Retina::write(String fs)
  116. //
  117. /**
  118. * Write xml/yml formated parameters information
  119. * @param fs the filename of the xml file that will be open and writen with formatted parameters
  120. * information
  121. */
  122. - (void)write:(NSString*)fs NS_SWIFT_NAME(write(fs:));
  123. //
  124. // void cv::bioinspired::Retina::setupOPLandIPLParvoChannel(bool colorMode = true, bool normaliseOutput = true, float photoreceptorsLocalAdaptationSensitivity = 0.7f, float photoreceptorsTemporalConstant = 0.5f, float photoreceptorsSpatialConstant = 0.53f, float horizontalCellsGain = 0.f, float HcellsTemporalConstant = 1.f, float HcellsSpatialConstant = 7.f, float ganglionCellsSensitivity = 0.7f)
  125. //
  126. /**
  127. * Setup the OPL and IPL parvo channels (see biologocal model)
  128. *
  129. * OPL is referred as Outer Plexiform Layer of the retina, it allows the spatio-temporal filtering
  130. * which withens the spectrum and reduces spatio-temporal noise while attenuating global luminance
  131. * (low frequency energy) IPL parvo is the OPL next processing stage, it refers to a part of the
  132. * Inner Plexiform layer of the retina, it allows high contours sensitivity in foveal vision. See
  133. * reference papers for more informations.
  134. * for more informations, please have a look at the paper Benoit A., Caplier A., Durette B., Herault, J., "USING HUMAN VISUAL SYSTEM MODELING FOR BIO-INSPIRED LOW LEVEL IMAGE PROCESSING", Elsevier, Computer Vision and Image Understanding 114 (2010), pp. 758-773, DOI: http://dx.doi.org/10.1016/j.cviu.2010.01.011
  135. * @param colorMode specifies if (true) color is processed of not (false) to then processing gray
  136. * level image
  137. * @param normaliseOutput specifies if (true) output is rescaled between 0 and 255 of not (false)
  138. * @param photoreceptorsLocalAdaptationSensitivity the photoreceptors sensitivity renage is 0-1
  139. * (more log compression effect when value increases)
  140. * @param photoreceptorsTemporalConstant the time constant of the first order low pass filter of
  141. * the photoreceptors, use it to cut high temporal frequencies (noise or fast motion), unit is
  142. * frames, typical value is 1 frame
  143. * @param photoreceptorsSpatialConstant the spatial constant of the first order low pass filter of
  144. * the photoreceptors, use it to cut high spatial frequencies (noise or thick contours), unit is
  145. * pixels, typical value is 1 pixel
  146. * @param horizontalCellsGain gain of the horizontal cells network, if 0, then the mean value of
  147. * the output is zero, if the parameter is near 1, then, the luminance is not filtered and is
  148. * still reachable at the output, typicall value is 0
  149. * @param HcellsTemporalConstant the time constant of the first order low pass filter of the
  150. * horizontal cells, use it to cut low temporal frequencies (local luminance variations), unit is
  151. * frames, typical value is 1 frame, as the photoreceptors
  152. * @param HcellsSpatialConstant the spatial constant of the first order low pass filter of the
  153. * horizontal cells, use it to cut low spatial frequencies (local luminance), unit is pixels,
  154. * typical value is 5 pixel, this value is also used for local contrast computing when computing
  155. * the local contrast adaptation at the ganglion cells level (Inner Plexiform Layer parvocellular
  156. * channel model)
  157. * @param ganglionCellsSensitivity the compression strengh of the ganglion cells local adaptation
  158. * output, set a value between 0.6 and 1 for best results, a high value increases more the low
  159. * value sensitivity... and the output saturates faster, recommended value: 0.7
  160. */
  161. - (void)setupOPLandIPLParvoChannel:(BOOL)colorMode normaliseOutput:(BOOL)normaliseOutput photoreceptorsLocalAdaptationSensitivity:(float)photoreceptorsLocalAdaptationSensitivity photoreceptorsTemporalConstant:(float)photoreceptorsTemporalConstant photoreceptorsSpatialConstant:(float)photoreceptorsSpatialConstant horizontalCellsGain:(float)horizontalCellsGain HcellsTemporalConstant:(float)HcellsTemporalConstant HcellsSpatialConstant:(float)HcellsSpatialConstant ganglionCellsSensitivity:(float)ganglionCellsSensitivity NS_SWIFT_NAME(setupOPLandIPLParvoChannel(colorMode:normaliseOutput:photoreceptorsLocalAdaptationSensitivity:photoreceptorsTemporalConstant:photoreceptorsSpatialConstant:horizontalCellsGain:HcellsTemporalConstant:HcellsSpatialConstant:ganglionCellsSensitivity:));
  162. /**
  163. * Setup the OPL and IPL parvo channels (see biologocal model)
  164. *
  165. * OPL is referred as Outer Plexiform Layer of the retina, it allows the spatio-temporal filtering
  166. * which withens the spectrum and reduces spatio-temporal noise while attenuating global luminance
  167. * (low frequency energy) IPL parvo is the OPL next processing stage, it refers to a part of the
  168. * Inner Plexiform layer of the retina, it allows high contours sensitivity in foveal vision. See
  169. * reference papers for more informations.
  170. * for more informations, please have a look at the paper Benoit A., Caplier A., Durette B., Herault, J., "USING HUMAN VISUAL SYSTEM MODELING FOR BIO-INSPIRED LOW LEVEL IMAGE PROCESSING", Elsevier, Computer Vision and Image Understanding 114 (2010), pp. 758-773, DOI: http://dx.doi.org/10.1016/j.cviu.2010.01.011
  171. * @param colorMode specifies if (true) color is processed of not (false) to then processing gray
  172. * level image
  173. * @param normaliseOutput specifies if (true) output is rescaled between 0 and 255 of not (false)
  174. * @param photoreceptorsLocalAdaptationSensitivity the photoreceptors sensitivity renage is 0-1
  175. * (more log compression effect when value increases)
  176. * @param photoreceptorsTemporalConstant the time constant of the first order low pass filter of
  177. * the photoreceptors, use it to cut high temporal frequencies (noise or fast motion), unit is
  178. * frames, typical value is 1 frame
  179. * @param photoreceptorsSpatialConstant the spatial constant of the first order low pass filter of
  180. * the photoreceptors, use it to cut high spatial frequencies (noise or thick contours), unit is
  181. * pixels, typical value is 1 pixel
  182. * @param horizontalCellsGain gain of the horizontal cells network, if 0, then the mean value of
  183. * the output is zero, if the parameter is near 1, then, the luminance is not filtered and is
  184. * still reachable at the output, typicall value is 0
  185. * @param HcellsTemporalConstant the time constant of the first order low pass filter of the
  186. * horizontal cells, use it to cut low temporal frequencies (local luminance variations), unit is
  187. * frames, typical value is 1 frame, as the photoreceptors
  188. * @param HcellsSpatialConstant the spatial constant of the first order low pass filter of the
  189. * horizontal cells, use it to cut low spatial frequencies (local luminance), unit is pixels,
  190. * typical value is 5 pixel, this value is also used for local contrast computing when computing
  191. * the local contrast adaptation at the ganglion cells level (Inner Plexiform Layer parvocellular
  192. * channel model)
  193. * output, set a value between 0.6 and 1 for best results, a high value increases more the low
  194. * value sensitivity... and the output saturates faster, recommended value: 0.7
  195. */
  196. - (void)setupOPLandIPLParvoChannel:(BOOL)colorMode normaliseOutput:(BOOL)normaliseOutput photoreceptorsLocalAdaptationSensitivity:(float)photoreceptorsLocalAdaptationSensitivity photoreceptorsTemporalConstant:(float)photoreceptorsTemporalConstant photoreceptorsSpatialConstant:(float)photoreceptorsSpatialConstant horizontalCellsGain:(float)horizontalCellsGain HcellsTemporalConstant:(float)HcellsTemporalConstant HcellsSpatialConstant:(float)HcellsSpatialConstant NS_SWIFT_NAME(setupOPLandIPLParvoChannel(colorMode:normaliseOutput:photoreceptorsLocalAdaptationSensitivity:photoreceptorsTemporalConstant:photoreceptorsSpatialConstant:horizontalCellsGain:HcellsTemporalConstant:HcellsSpatialConstant:));
  197. /**
  198. * Setup the OPL and IPL parvo channels (see biologocal model)
  199. *
  200. * OPL is referred as Outer Plexiform Layer of the retina, it allows the spatio-temporal filtering
  201. * which withens the spectrum and reduces spatio-temporal noise while attenuating global luminance
  202. * (low frequency energy) IPL parvo is the OPL next processing stage, it refers to a part of the
  203. * Inner Plexiform layer of the retina, it allows high contours sensitivity in foveal vision. See
  204. * reference papers for more informations.
  205. * for more informations, please have a look at the paper Benoit A., Caplier A., Durette B., Herault, J., "USING HUMAN VISUAL SYSTEM MODELING FOR BIO-INSPIRED LOW LEVEL IMAGE PROCESSING", Elsevier, Computer Vision and Image Understanding 114 (2010), pp. 758-773, DOI: http://dx.doi.org/10.1016/j.cviu.2010.01.011
  206. * @param colorMode specifies if (true) color is processed of not (false) to then processing gray
  207. * level image
  208. * @param normaliseOutput specifies if (true) output is rescaled between 0 and 255 of not (false)
  209. * @param photoreceptorsLocalAdaptationSensitivity the photoreceptors sensitivity renage is 0-1
  210. * (more log compression effect when value increases)
  211. * @param photoreceptorsTemporalConstant the time constant of the first order low pass filter of
  212. * the photoreceptors, use it to cut high temporal frequencies (noise or fast motion), unit is
  213. * frames, typical value is 1 frame
  214. * @param photoreceptorsSpatialConstant the spatial constant of the first order low pass filter of
  215. * the photoreceptors, use it to cut high spatial frequencies (noise or thick contours), unit is
  216. * pixels, typical value is 1 pixel
  217. * @param horizontalCellsGain gain of the horizontal cells network, if 0, then the mean value of
  218. * the output is zero, if the parameter is near 1, then, the luminance is not filtered and is
  219. * still reachable at the output, typicall value is 0
  220. * @param HcellsTemporalConstant the time constant of the first order low pass filter of the
  221. * horizontal cells, use it to cut low temporal frequencies (local luminance variations), unit is
  222. * frames, typical value is 1 frame, as the photoreceptors
  223. * horizontal cells, use it to cut low spatial frequencies (local luminance), unit is pixels,
  224. * typical value is 5 pixel, this value is also used for local contrast computing when computing
  225. * the local contrast adaptation at the ganglion cells level (Inner Plexiform Layer parvocellular
  226. * channel model)
  227. * output, set a value between 0.6 and 1 for best results, a high value increases more the low
  228. * value sensitivity... and the output saturates faster, recommended value: 0.7
  229. */
  230. - (void)setupOPLandIPLParvoChannel:(BOOL)colorMode normaliseOutput:(BOOL)normaliseOutput photoreceptorsLocalAdaptationSensitivity:(float)photoreceptorsLocalAdaptationSensitivity photoreceptorsTemporalConstant:(float)photoreceptorsTemporalConstant photoreceptorsSpatialConstant:(float)photoreceptorsSpatialConstant horizontalCellsGain:(float)horizontalCellsGain HcellsTemporalConstant:(float)HcellsTemporalConstant NS_SWIFT_NAME(setupOPLandIPLParvoChannel(colorMode:normaliseOutput:photoreceptorsLocalAdaptationSensitivity:photoreceptorsTemporalConstant:photoreceptorsSpatialConstant:horizontalCellsGain:HcellsTemporalConstant:));
  231. /**
  232. * Setup the OPL and IPL parvo channels (see biologocal model)
  233. *
  234. * OPL is referred as Outer Plexiform Layer of the retina, it allows the spatio-temporal filtering
  235. * which withens the spectrum and reduces spatio-temporal noise while attenuating global luminance
  236. * (low frequency energy) IPL parvo is the OPL next processing stage, it refers to a part of the
  237. * Inner Plexiform layer of the retina, it allows high contours sensitivity in foveal vision. See
  238. * reference papers for more informations.
  239. * for more informations, please have a look at the paper Benoit A., Caplier A., Durette B., Herault, J., "USING HUMAN VISUAL SYSTEM MODELING FOR BIO-INSPIRED LOW LEVEL IMAGE PROCESSING", Elsevier, Computer Vision and Image Understanding 114 (2010), pp. 758-773, DOI: http://dx.doi.org/10.1016/j.cviu.2010.01.011
  240. * @param colorMode specifies if (true) color is processed of not (false) to then processing gray
  241. * level image
  242. * @param normaliseOutput specifies if (true) output is rescaled between 0 and 255 of not (false)
  243. * @param photoreceptorsLocalAdaptationSensitivity the photoreceptors sensitivity renage is 0-1
  244. * (more log compression effect when value increases)
  245. * @param photoreceptorsTemporalConstant the time constant of the first order low pass filter of
  246. * the photoreceptors, use it to cut high temporal frequencies (noise or fast motion), unit is
  247. * frames, typical value is 1 frame
  248. * @param photoreceptorsSpatialConstant the spatial constant of the first order low pass filter of
  249. * the photoreceptors, use it to cut high spatial frequencies (noise or thick contours), unit is
  250. * pixels, typical value is 1 pixel
  251. * @param horizontalCellsGain gain of the horizontal cells network, if 0, then the mean value of
  252. * the output is zero, if the parameter is near 1, then, the luminance is not filtered and is
  253. * still reachable at the output, typicall value is 0
  254. * horizontal cells, use it to cut low temporal frequencies (local luminance variations), unit is
  255. * frames, typical value is 1 frame, as the photoreceptors
  256. * horizontal cells, use it to cut low spatial frequencies (local luminance), unit is pixels,
  257. * typical value is 5 pixel, this value is also used for local contrast computing when computing
  258. * the local contrast adaptation at the ganglion cells level (Inner Plexiform Layer parvocellular
  259. * channel model)
  260. * output, set a value between 0.6 and 1 for best results, a high value increases more the low
  261. * value sensitivity... and the output saturates faster, recommended value: 0.7
  262. */
  263. - (void)setupOPLandIPLParvoChannel:(BOOL)colorMode normaliseOutput:(BOOL)normaliseOutput photoreceptorsLocalAdaptationSensitivity:(float)photoreceptorsLocalAdaptationSensitivity photoreceptorsTemporalConstant:(float)photoreceptorsTemporalConstant photoreceptorsSpatialConstant:(float)photoreceptorsSpatialConstant horizontalCellsGain:(float)horizontalCellsGain NS_SWIFT_NAME(setupOPLandIPLParvoChannel(colorMode:normaliseOutput:photoreceptorsLocalAdaptationSensitivity:photoreceptorsTemporalConstant:photoreceptorsSpatialConstant:horizontalCellsGain:));
  264. /**
  265. * Setup the OPL and IPL parvo channels (see biologocal model)
  266. *
  267. * OPL is referred as Outer Plexiform Layer of the retina, it allows the spatio-temporal filtering
  268. * which withens the spectrum and reduces spatio-temporal noise while attenuating global luminance
  269. * (low frequency energy) IPL parvo is the OPL next processing stage, it refers to a part of the
  270. * Inner Plexiform layer of the retina, it allows high contours sensitivity in foveal vision. See
  271. * reference papers for more informations.
  272. * for more informations, please have a look at the paper Benoit A., Caplier A., Durette B., Herault, J., "USING HUMAN VISUAL SYSTEM MODELING FOR BIO-INSPIRED LOW LEVEL IMAGE PROCESSING", Elsevier, Computer Vision and Image Understanding 114 (2010), pp. 758-773, DOI: http://dx.doi.org/10.1016/j.cviu.2010.01.011
  273. * @param colorMode specifies if (true) color is processed of not (false) to then processing gray
  274. * level image
  275. * @param normaliseOutput specifies if (true) output is rescaled between 0 and 255 of not (false)
  276. * @param photoreceptorsLocalAdaptationSensitivity the photoreceptors sensitivity renage is 0-1
  277. * (more log compression effect when value increases)
  278. * @param photoreceptorsTemporalConstant the time constant of the first order low pass filter of
  279. * the photoreceptors, use it to cut high temporal frequencies (noise or fast motion), unit is
  280. * frames, typical value is 1 frame
  281. * @param photoreceptorsSpatialConstant the spatial constant of the first order low pass filter of
  282. * the photoreceptors, use it to cut high spatial frequencies (noise or thick contours), unit is
  283. * pixels, typical value is 1 pixel
  284. * the output is zero, if the parameter is near 1, then, the luminance is not filtered and is
  285. * still reachable at the output, typicall value is 0
  286. * horizontal cells, use it to cut low temporal frequencies (local luminance variations), unit is
  287. * frames, typical value is 1 frame, as the photoreceptors
  288. * horizontal cells, use it to cut low spatial frequencies (local luminance), unit is pixels,
  289. * typical value is 5 pixel, this value is also used for local contrast computing when computing
  290. * the local contrast adaptation at the ganglion cells level (Inner Plexiform Layer parvocellular
  291. * channel model)
  292. * output, set a value between 0.6 and 1 for best results, a high value increases more the low
  293. * value sensitivity... and the output saturates faster, recommended value: 0.7
  294. */
  295. - (void)setupOPLandIPLParvoChannel:(BOOL)colorMode normaliseOutput:(BOOL)normaliseOutput photoreceptorsLocalAdaptationSensitivity:(float)photoreceptorsLocalAdaptationSensitivity photoreceptorsTemporalConstant:(float)photoreceptorsTemporalConstant photoreceptorsSpatialConstant:(float)photoreceptorsSpatialConstant NS_SWIFT_NAME(setupOPLandIPLParvoChannel(colorMode:normaliseOutput:photoreceptorsLocalAdaptationSensitivity:photoreceptorsTemporalConstant:photoreceptorsSpatialConstant:));
  296. /**
  297. * Setup the OPL and IPL parvo channels (see biologocal model)
  298. *
  299. * OPL is referred as Outer Plexiform Layer of the retina, it allows the spatio-temporal filtering
  300. * which withens the spectrum and reduces spatio-temporal noise while attenuating global luminance
  301. * (low frequency energy) IPL parvo is the OPL next processing stage, it refers to a part of the
  302. * Inner Plexiform layer of the retina, it allows high contours sensitivity in foveal vision. See
  303. * reference papers for more informations.
  304. * for more informations, please have a look at the paper Benoit A., Caplier A., Durette B., Herault, J., "USING HUMAN VISUAL SYSTEM MODELING FOR BIO-INSPIRED LOW LEVEL IMAGE PROCESSING", Elsevier, Computer Vision and Image Understanding 114 (2010), pp. 758-773, DOI: http://dx.doi.org/10.1016/j.cviu.2010.01.011
  305. * @param colorMode specifies if (true) color is processed of not (false) to then processing gray
  306. * level image
  307. * @param normaliseOutput specifies if (true) output is rescaled between 0 and 255 of not (false)
  308. * @param photoreceptorsLocalAdaptationSensitivity the photoreceptors sensitivity renage is 0-1
  309. * (more log compression effect when value increases)
  310. * @param photoreceptorsTemporalConstant the time constant of the first order low pass filter of
  311. * the photoreceptors, use it to cut high temporal frequencies (noise or fast motion), unit is
  312. * frames, typical value is 1 frame
  313. * the photoreceptors, use it to cut high spatial frequencies (noise or thick contours), unit is
  314. * pixels, typical value is 1 pixel
  315. * the output is zero, if the parameter is near 1, then, the luminance is not filtered and is
  316. * still reachable at the output, typicall value is 0
  317. * horizontal cells, use it to cut low temporal frequencies (local luminance variations), unit is
  318. * frames, typical value is 1 frame, as the photoreceptors
  319. * horizontal cells, use it to cut low spatial frequencies (local luminance), unit is pixels,
  320. * typical value is 5 pixel, this value is also used for local contrast computing when computing
  321. * the local contrast adaptation at the ganglion cells level (Inner Plexiform Layer parvocellular
  322. * channel model)
  323. * output, set a value between 0.6 and 1 for best results, a high value increases more the low
  324. * value sensitivity... and the output saturates faster, recommended value: 0.7
  325. */
  326. - (void)setupOPLandIPLParvoChannel:(BOOL)colorMode normaliseOutput:(BOOL)normaliseOutput photoreceptorsLocalAdaptationSensitivity:(float)photoreceptorsLocalAdaptationSensitivity photoreceptorsTemporalConstant:(float)photoreceptorsTemporalConstant NS_SWIFT_NAME(setupOPLandIPLParvoChannel(colorMode:normaliseOutput:photoreceptorsLocalAdaptationSensitivity:photoreceptorsTemporalConstant:));
  327. /**
  328. * Setup the OPL and IPL parvo channels (see biologocal model)
  329. *
  330. * OPL is referred as Outer Plexiform Layer of the retina, it allows the spatio-temporal filtering
  331. * which withens the spectrum and reduces spatio-temporal noise while attenuating global luminance
  332. * (low frequency energy) IPL parvo is the OPL next processing stage, it refers to a part of the
  333. * Inner Plexiform layer of the retina, it allows high contours sensitivity in foveal vision. See
  334. * reference papers for more informations.
  335. * for more informations, please have a look at the paper Benoit A., Caplier A., Durette B., Herault, J., "USING HUMAN VISUAL SYSTEM MODELING FOR BIO-INSPIRED LOW LEVEL IMAGE PROCESSING", Elsevier, Computer Vision and Image Understanding 114 (2010), pp. 758-773, DOI: http://dx.doi.org/10.1016/j.cviu.2010.01.011
  336. * @param colorMode specifies if (true) color is processed of not (false) to then processing gray
  337. * level image
  338. * @param normaliseOutput specifies if (true) output is rescaled between 0 and 255 of not (false)
  339. * @param photoreceptorsLocalAdaptationSensitivity the photoreceptors sensitivity renage is 0-1
  340. * (more log compression effect when value increases)
  341. * the photoreceptors, use it to cut high temporal frequencies (noise or fast motion), unit is
  342. * frames, typical value is 1 frame
  343. * the photoreceptors, use it to cut high spatial frequencies (noise or thick contours), unit is
  344. * pixels, typical value is 1 pixel
  345. * the output is zero, if the parameter is near 1, then, the luminance is not filtered and is
  346. * still reachable at the output, typicall value is 0
  347. * horizontal cells, use it to cut low temporal frequencies (local luminance variations), unit is
  348. * frames, typical value is 1 frame, as the photoreceptors
  349. * horizontal cells, use it to cut low spatial frequencies (local luminance), unit is pixels,
  350. * typical value is 5 pixel, this value is also used for local contrast computing when computing
  351. * the local contrast adaptation at the ganglion cells level (Inner Plexiform Layer parvocellular
  352. * channel model)
  353. * output, set a value between 0.6 and 1 for best results, a high value increases more the low
  354. * value sensitivity... and the output saturates faster, recommended value: 0.7
  355. */
  356. - (void)setupOPLandIPLParvoChannel:(BOOL)colorMode normaliseOutput:(BOOL)normaliseOutput photoreceptorsLocalAdaptationSensitivity:(float)photoreceptorsLocalAdaptationSensitivity NS_SWIFT_NAME(setupOPLandIPLParvoChannel(colorMode:normaliseOutput:photoreceptorsLocalAdaptationSensitivity:));
  357. /**
  358. * Setup the OPL and IPL parvo channels (see biologocal model)
  359. *
  360. * OPL is referred as Outer Plexiform Layer of the retina, it allows the spatio-temporal filtering
  361. * which withens the spectrum and reduces spatio-temporal noise while attenuating global luminance
  362. * (low frequency energy) IPL parvo is the OPL next processing stage, it refers to a part of the
  363. * Inner Plexiform layer of the retina, it allows high contours sensitivity in foveal vision. See
  364. * reference papers for more informations.
  365. * for more informations, please have a look at the paper Benoit A., Caplier A., Durette B., Herault, J., "USING HUMAN VISUAL SYSTEM MODELING FOR BIO-INSPIRED LOW LEVEL IMAGE PROCESSING", Elsevier, Computer Vision and Image Understanding 114 (2010), pp. 758-773, DOI: http://dx.doi.org/10.1016/j.cviu.2010.01.011
  366. * @param colorMode specifies if (true) color is processed of not (false) to then processing gray
  367. * level image
  368. * @param normaliseOutput specifies if (true) output is rescaled between 0 and 255 of not (false)
  369. * (more log compression effect when value increases)
  370. * the photoreceptors, use it to cut high temporal frequencies (noise or fast motion), unit is
  371. * frames, typical value is 1 frame
  372. * the photoreceptors, use it to cut high spatial frequencies (noise or thick contours), unit is
  373. * pixels, typical value is 1 pixel
  374. * the output is zero, if the parameter is near 1, then, the luminance is not filtered and is
  375. * still reachable at the output, typicall value is 0
  376. * horizontal cells, use it to cut low temporal frequencies (local luminance variations), unit is
  377. * frames, typical value is 1 frame, as the photoreceptors
  378. * horizontal cells, use it to cut low spatial frequencies (local luminance), unit is pixels,
  379. * typical value is 5 pixel, this value is also used for local contrast computing when computing
  380. * the local contrast adaptation at the ganglion cells level (Inner Plexiform Layer parvocellular
  381. * channel model)
  382. * output, set a value between 0.6 and 1 for best results, a high value increases more the low
  383. * value sensitivity... and the output saturates faster, recommended value: 0.7
  384. */
  385. - (void)setupOPLandIPLParvoChannel:(BOOL)colorMode normaliseOutput:(BOOL)normaliseOutput NS_SWIFT_NAME(setupOPLandIPLParvoChannel(colorMode:normaliseOutput:));
  386. /**
  387. * Setup the OPL and IPL parvo channels (see biologocal model)
  388. *
  389. * OPL is referred as Outer Plexiform Layer of the retina, it allows the spatio-temporal filtering
  390. * which withens the spectrum and reduces spatio-temporal noise while attenuating global luminance
  391. * (low frequency energy) IPL parvo is the OPL next processing stage, it refers to a part of the
  392. * Inner Plexiform layer of the retina, it allows high contours sensitivity in foveal vision. See
  393. * reference papers for more informations.
  394. * for more informations, please have a look at the paper Benoit A., Caplier A., Durette B., Herault, J., "USING HUMAN VISUAL SYSTEM MODELING FOR BIO-INSPIRED LOW LEVEL IMAGE PROCESSING", Elsevier, Computer Vision and Image Understanding 114 (2010), pp. 758-773, DOI: http://dx.doi.org/10.1016/j.cviu.2010.01.011
  395. * @param colorMode specifies if (true) color is processed of not (false) to then processing gray
  396. * level image
  397. * (more log compression effect when value increases)
  398. * the photoreceptors, use it to cut high temporal frequencies (noise or fast motion), unit is
  399. * frames, typical value is 1 frame
  400. * the photoreceptors, use it to cut high spatial frequencies (noise or thick contours), unit is
  401. * pixels, typical value is 1 pixel
  402. * the output is zero, if the parameter is near 1, then, the luminance is not filtered and is
  403. * still reachable at the output, typicall value is 0
  404. * horizontal cells, use it to cut low temporal frequencies (local luminance variations), unit is
  405. * frames, typical value is 1 frame, as the photoreceptors
  406. * horizontal cells, use it to cut low spatial frequencies (local luminance), unit is pixels,
  407. * typical value is 5 pixel, this value is also used for local contrast computing when computing
  408. * the local contrast adaptation at the ganglion cells level (Inner Plexiform Layer parvocellular
  409. * channel model)
  410. * output, set a value between 0.6 and 1 for best results, a high value increases more the low
  411. * value sensitivity... and the output saturates faster, recommended value: 0.7
  412. */
  413. - (void)setupOPLandIPLParvoChannel:(BOOL)colorMode NS_SWIFT_NAME(setupOPLandIPLParvoChannel(colorMode:));
  414. /**
  415. * Setup the OPL and IPL parvo channels (see biologocal model)
  416. *
  417. * OPL is referred as Outer Plexiform Layer of the retina, it allows the spatio-temporal filtering
  418. * which withens the spectrum and reduces spatio-temporal noise while attenuating global luminance
  419. * (low frequency energy) IPL parvo is the OPL next processing stage, it refers to a part of the
  420. * Inner Plexiform layer of the retina, it allows high contours sensitivity in foveal vision. See
  421. * reference papers for more informations.
  422. * for more informations, please have a look at the paper Benoit A., Caplier A., Durette B., Herault, J., "USING HUMAN VISUAL SYSTEM MODELING FOR BIO-INSPIRED LOW LEVEL IMAGE PROCESSING", Elsevier, Computer Vision and Image Understanding 114 (2010), pp. 758-773, DOI: http://dx.doi.org/10.1016/j.cviu.2010.01.011
  423. * level image
  424. * (more log compression effect when value increases)
  425. * the photoreceptors, use it to cut high temporal frequencies (noise or fast motion), unit is
  426. * frames, typical value is 1 frame
  427. * the photoreceptors, use it to cut high spatial frequencies (noise or thick contours), unit is
  428. * pixels, typical value is 1 pixel
  429. * the output is zero, if the parameter is near 1, then, the luminance is not filtered and is
  430. * still reachable at the output, typicall value is 0
  431. * horizontal cells, use it to cut low temporal frequencies (local luminance variations), unit is
  432. * frames, typical value is 1 frame, as the photoreceptors
  433. * horizontal cells, use it to cut low spatial frequencies (local luminance), unit is pixels,
  434. * typical value is 5 pixel, this value is also used for local contrast computing when computing
  435. * the local contrast adaptation at the ganglion cells level (Inner Plexiform Layer parvocellular
  436. * channel model)
  437. * output, set a value between 0.6 and 1 for best results, a high value increases more the low
  438. * value sensitivity... and the output saturates faster, recommended value: 0.7
  439. */
  440. - (void)setupOPLandIPLParvoChannel NS_SWIFT_NAME(setupOPLandIPLParvoChannel());
  441. //
  442. // void cv::bioinspired::Retina::setupIPLMagnoChannel(bool normaliseOutput = true, float parasolCells_beta = 0.f, float parasolCells_tau = 0.f, float parasolCells_k = 7.f, float amacrinCellsTemporalCutFrequency = 1.2f, float V0CompressionParameter = 0.95f, float localAdaptintegration_tau = 0.f, float localAdaptintegration_k = 7.f)
  443. //
  444. /**
  445. * Set parameters values for the Inner Plexiform Layer (IPL) magnocellular channel
  446. *
  447. * this channel processes signals output from OPL processing stage in peripheral vision, it allows
  448. * motion information enhancement. It is decorrelated from the details channel. See reference
  449. * papers for more details.
  450. *
  451. * @param normaliseOutput specifies if (true) output is rescaled between 0 and 255 of not (false)
  452. * @param parasolCells_beta the low pass filter gain used for local contrast adaptation at the
  453. * IPL level of the retina (for ganglion cells local adaptation), typical value is 0
  454. * @param parasolCells_tau the low pass filter time constant used for local contrast adaptation
  455. * at the IPL level of the retina (for ganglion cells local adaptation), unit is frame, typical
  456. * value is 0 (immediate response)
  457. * @param parasolCells_k the low pass filter spatial constant used for local contrast adaptation
  458. * at the IPL level of the retina (for ganglion cells local adaptation), unit is pixels, typical
  459. * value is 5
  460. * @param amacrinCellsTemporalCutFrequency the time constant of the first order high pass fiter of
  461. * the magnocellular way (motion information channel), unit is frames, typical value is 1.2
  462. * @param V0CompressionParameter the compression strengh of the ganglion cells local adaptation
  463. * output, set a value between 0.6 and 1 for best results, a high value increases more the low
  464. * value sensitivity... and the output saturates faster, recommended value: 0.95
  465. * @param localAdaptintegration_tau specifies the temporal constant of the low pas filter
  466. * involved in the computation of the local "motion mean" for the local adaptation computation
  467. * @param localAdaptintegration_k specifies the spatial constant of the low pas filter involved
  468. * in the computation of the local "motion mean" for the local adaptation computation
  469. */
  470. - (void)setupIPLMagnoChannel:(BOOL)normaliseOutput parasolCells_beta:(float)parasolCells_beta parasolCells_tau:(float)parasolCells_tau parasolCells_k:(float)parasolCells_k amacrinCellsTemporalCutFrequency:(float)amacrinCellsTemporalCutFrequency V0CompressionParameter:(float)V0CompressionParameter localAdaptintegration_tau:(float)localAdaptintegration_tau localAdaptintegration_k:(float)localAdaptintegration_k NS_SWIFT_NAME(setupIPLMagnoChannel(normaliseOutput:parasolCells_beta:parasolCells_tau:parasolCells_k:amacrinCellsTemporalCutFrequency:V0CompressionParameter:localAdaptintegration_tau:localAdaptintegration_k:));
  471. /**
  472. * Set parameters values for the Inner Plexiform Layer (IPL) magnocellular channel
  473. *
  474. * this channel processes signals output from OPL processing stage in peripheral vision, it allows
  475. * motion information enhancement. It is decorrelated from the details channel. See reference
  476. * papers for more details.
  477. *
  478. * @param normaliseOutput specifies if (true) output is rescaled between 0 and 255 of not (false)
  479. * @param parasolCells_beta the low pass filter gain used for local contrast adaptation at the
  480. * IPL level of the retina (for ganglion cells local adaptation), typical value is 0
  481. * @param parasolCells_tau the low pass filter time constant used for local contrast adaptation
  482. * at the IPL level of the retina (for ganglion cells local adaptation), unit is frame, typical
  483. * value is 0 (immediate response)
  484. * @param parasolCells_k the low pass filter spatial constant used for local contrast adaptation
  485. * at the IPL level of the retina (for ganglion cells local adaptation), unit is pixels, typical
  486. * value is 5
  487. * @param amacrinCellsTemporalCutFrequency the time constant of the first order high pass fiter of
  488. * the magnocellular way (motion information channel), unit is frames, typical value is 1.2
  489. * @param V0CompressionParameter the compression strengh of the ganglion cells local adaptation
  490. * output, set a value between 0.6 and 1 for best results, a high value increases more the low
  491. * value sensitivity... and the output saturates faster, recommended value: 0.95
  492. * @param localAdaptintegration_tau specifies the temporal constant of the low pas filter
  493. * involved in the computation of the local "motion mean" for the local adaptation computation
  494. * in the computation of the local "motion mean" for the local adaptation computation
  495. */
  496. - (void)setupIPLMagnoChannel:(BOOL)normaliseOutput parasolCells_beta:(float)parasolCells_beta parasolCells_tau:(float)parasolCells_tau parasolCells_k:(float)parasolCells_k amacrinCellsTemporalCutFrequency:(float)amacrinCellsTemporalCutFrequency V0CompressionParameter:(float)V0CompressionParameter localAdaptintegration_tau:(float)localAdaptintegration_tau NS_SWIFT_NAME(setupIPLMagnoChannel(normaliseOutput:parasolCells_beta:parasolCells_tau:parasolCells_k:amacrinCellsTemporalCutFrequency:V0CompressionParameter:localAdaptintegration_tau:));
  497. /**
  498. * Set parameters values for the Inner Plexiform Layer (IPL) magnocellular channel
  499. *
  500. * this channel processes signals output from OPL processing stage in peripheral vision, it allows
  501. * motion information enhancement. It is decorrelated from the details channel. See reference
  502. * papers for more details.
  503. *
  504. * @param normaliseOutput specifies if (true) output is rescaled between 0 and 255 of not (false)
  505. * @param parasolCells_beta the low pass filter gain used for local contrast adaptation at the
  506. * IPL level of the retina (for ganglion cells local adaptation), typical value is 0
  507. * @param parasolCells_tau the low pass filter time constant used for local contrast adaptation
  508. * at the IPL level of the retina (for ganglion cells local adaptation), unit is frame, typical
  509. * value is 0 (immediate response)
  510. * @param parasolCells_k the low pass filter spatial constant used for local contrast adaptation
  511. * at the IPL level of the retina (for ganglion cells local adaptation), unit is pixels, typical
  512. * value is 5
  513. * @param amacrinCellsTemporalCutFrequency the time constant of the first order high pass fiter of
  514. * the magnocellular way (motion information channel), unit is frames, typical value is 1.2
  515. * @param V0CompressionParameter the compression strengh of the ganglion cells local adaptation
  516. * output, set a value between 0.6 and 1 for best results, a high value increases more the low
  517. * value sensitivity... and the output saturates faster, recommended value: 0.95
  518. * involved in the computation of the local "motion mean" for the local adaptation computation
  519. * in the computation of the local "motion mean" for the local adaptation computation
  520. */
  521. - (void)setupIPLMagnoChannel:(BOOL)normaliseOutput parasolCells_beta:(float)parasolCells_beta parasolCells_tau:(float)parasolCells_tau parasolCells_k:(float)parasolCells_k amacrinCellsTemporalCutFrequency:(float)amacrinCellsTemporalCutFrequency V0CompressionParameter:(float)V0CompressionParameter NS_SWIFT_NAME(setupIPLMagnoChannel(normaliseOutput:parasolCells_beta:parasolCells_tau:parasolCells_k:amacrinCellsTemporalCutFrequency:V0CompressionParameter:));
  522. /**
  523. * Set parameters values for the Inner Plexiform Layer (IPL) magnocellular channel
  524. *
  525. * this channel processes signals output from OPL processing stage in peripheral vision, it allows
  526. * motion information enhancement. It is decorrelated from the details channel. See reference
  527. * papers for more details.
  528. *
  529. * @param normaliseOutput specifies if (true) output is rescaled between 0 and 255 of not (false)
  530. * @param parasolCells_beta the low pass filter gain used for local contrast adaptation at the
  531. * IPL level of the retina (for ganglion cells local adaptation), typical value is 0
  532. * @param parasolCells_tau the low pass filter time constant used for local contrast adaptation
  533. * at the IPL level of the retina (for ganglion cells local adaptation), unit is frame, typical
  534. * value is 0 (immediate response)
  535. * @param parasolCells_k the low pass filter spatial constant used for local contrast adaptation
  536. * at the IPL level of the retina (for ganglion cells local adaptation), unit is pixels, typical
  537. * value is 5
  538. * @param amacrinCellsTemporalCutFrequency the time constant of the first order high pass fiter of
  539. * the magnocellular way (motion information channel), unit is frames, typical value is 1.2
  540. * output, set a value between 0.6 and 1 for best results, a high value increases more the low
  541. * value sensitivity... and the output saturates faster, recommended value: 0.95
  542. * involved in the computation of the local "motion mean" for the local adaptation computation
  543. * in the computation of the local "motion mean" for the local adaptation computation
  544. */
  545. - (void)setupIPLMagnoChannel:(BOOL)normaliseOutput parasolCells_beta:(float)parasolCells_beta parasolCells_tau:(float)parasolCells_tau parasolCells_k:(float)parasolCells_k amacrinCellsTemporalCutFrequency:(float)amacrinCellsTemporalCutFrequency NS_SWIFT_NAME(setupIPLMagnoChannel(normaliseOutput:parasolCells_beta:parasolCells_tau:parasolCells_k:amacrinCellsTemporalCutFrequency:));
  546. /**
  547. * Set parameters values for the Inner Plexiform Layer (IPL) magnocellular channel
  548. *
  549. * this channel processes signals output from OPL processing stage in peripheral vision, it allows
  550. * motion information enhancement. It is decorrelated from the details channel. See reference
  551. * papers for more details.
  552. *
  553. * @param normaliseOutput specifies if (true) output is rescaled between 0 and 255 of not (false)
  554. * @param parasolCells_beta the low pass filter gain used for local contrast adaptation at the
  555. * IPL level of the retina (for ganglion cells local adaptation), typical value is 0
  556. * @param parasolCells_tau the low pass filter time constant used for local contrast adaptation
  557. * at the IPL level of the retina (for ganglion cells local adaptation), unit is frame, typical
  558. * value is 0 (immediate response)
  559. * @param parasolCells_k the low pass filter spatial constant used for local contrast adaptation
  560. * at the IPL level of the retina (for ganglion cells local adaptation), unit is pixels, typical
  561. * value is 5
  562. * the magnocellular way (motion information channel), unit is frames, typical value is 1.2
  563. * output, set a value between 0.6 and 1 for best results, a high value increases more the low
  564. * value sensitivity... and the output saturates faster, recommended value: 0.95
  565. * involved in the computation of the local "motion mean" for the local adaptation computation
  566. * in the computation of the local "motion mean" for the local adaptation computation
  567. */
  568. - (void)setupIPLMagnoChannel:(BOOL)normaliseOutput parasolCells_beta:(float)parasolCells_beta parasolCells_tau:(float)parasolCells_tau parasolCells_k:(float)parasolCells_k NS_SWIFT_NAME(setupIPLMagnoChannel(normaliseOutput:parasolCells_beta:parasolCells_tau:parasolCells_k:));
  569. /**
  570. * Set parameters values for the Inner Plexiform Layer (IPL) magnocellular channel
  571. *
  572. * this channel processes signals output from OPL processing stage in peripheral vision, it allows
  573. * motion information enhancement. It is decorrelated from the details channel. See reference
  574. * papers for more details.
  575. *
  576. * @param normaliseOutput specifies if (true) output is rescaled between 0 and 255 of not (false)
  577. * @param parasolCells_beta the low pass filter gain used for local contrast adaptation at the
  578. * IPL level of the retina (for ganglion cells local adaptation), typical value is 0
  579. * @param parasolCells_tau the low pass filter time constant used for local contrast adaptation
  580. * at the IPL level of the retina (for ganglion cells local adaptation), unit is frame, typical
  581. * value is 0 (immediate response)
  582. * at the IPL level of the retina (for ganglion cells local adaptation), unit is pixels, typical
  583. * value is 5
  584. * the magnocellular way (motion information channel), unit is frames, typical value is 1.2
  585. * output, set a value between 0.6 and 1 for best results, a high value increases more the low
  586. * value sensitivity... and the output saturates faster, recommended value: 0.95
  587. * involved in the computation of the local "motion mean" for the local adaptation computation
  588. * in the computation of the local "motion mean" for the local adaptation computation
  589. */
  590. - (void)setupIPLMagnoChannel:(BOOL)normaliseOutput parasolCells_beta:(float)parasolCells_beta parasolCells_tau:(float)parasolCells_tau NS_SWIFT_NAME(setupIPLMagnoChannel(normaliseOutput:parasolCells_beta:parasolCells_tau:));
  591. /**
  592. * Set parameters values for the Inner Plexiform Layer (IPL) magnocellular channel
  593. *
  594. * this channel processes signals output from OPL processing stage in peripheral vision, it allows
  595. * motion information enhancement. It is decorrelated from the details channel. See reference
  596. * papers for more details.
  597. *
  598. * @param normaliseOutput specifies if (true) output is rescaled between 0 and 255 of not (false)
  599. * @param parasolCells_beta the low pass filter gain used for local contrast adaptation at the
  600. * IPL level of the retina (for ganglion cells local adaptation), typical value is 0
  601. * at the IPL level of the retina (for ganglion cells local adaptation), unit is frame, typical
  602. * value is 0 (immediate response)
  603. * at the IPL level of the retina (for ganglion cells local adaptation), unit is pixels, typical
  604. * value is 5
  605. * the magnocellular way (motion information channel), unit is frames, typical value is 1.2
  606. * output, set a value between 0.6 and 1 for best results, a high value increases more the low
  607. * value sensitivity... and the output saturates faster, recommended value: 0.95
  608. * involved in the computation of the local "motion mean" for the local adaptation computation
  609. * in the computation of the local "motion mean" for the local adaptation computation
  610. */
  611. - (void)setupIPLMagnoChannel:(BOOL)normaliseOutput parasolCells_beta:(float)parasolCells_beta NS_SWIFT_NAME(setupIPLMagnoChannel(normaliseOutput:parasolCells_beta:));
  612. /**
  613. * Set parameters values for the Inner Plexiform Layer (IPL) magnocellular channel
  614. *
  615. * this channel processes signals output from OPL processing stage in peripheral vision, it allows
  616. * motion information enhancement. It is decorrelated from the details channel. See reference
  617. * papers for more details.
  618. *
  619. * @param normaliseOutput specifies if (true) output is rescaled between 0 and 255 of not (false)
  620. * IPL level of the retina (for ganglion cells local adaptation), typical value is 0
  621. * at the IPL level of the retina (for ganglion cells local adaptation), unit is frame, typical
  622. * value is 0 (immediate response)
  623. * at the IPL level of the retina (for ganglion cells local adaptation), unit is pixels, typical
  624. * value is 5
  625. * the magnocellular way (motion information channel), unit is frames, typical value is 1.2
  626. * output, set a value between 0.6 and 1 for best results, a high value increases more the low
  627. * value sensitivity... and the output saturates faster, recommended value: 0.95
  628. * involved in the computation of the local "motion mean" for the local adaptation computation
  629. * in the computation of the local "motion mean" for the local adaptation computation
  630. */
  631. - (void)setupIPLMagnoChannel:(BOOL)normaliseOutput NS_SWIFT_NAME(setupIPLMagnoChannel(normaliseOutput:));
  632. /**
  633. * Set parameters values for the Inner Plexiform Layer (IPL) magnocellular channel
  634. *
  635. * this channel processes signals output from OPL processing stage in peripheral vision, it allows
  636. * motion information enhancement. It is decorrelated from the details channel. See reference
  637. * papers for more details.
  638. *
  639. * IPL level of the retina (for ganglion cells local adaptation), typical value is 0
  640. * at the IPL level of the retina (for ganglion cells local adaptation), unit is frame, typical
  641. * value is 0 (immediate response)
  642. * at the IPL level of the retina (for ganglion cells local adaptation), unit is pixels, typical
  643. * value is 5
  644. * the magnocellular way (motion information channel), unit is frames, typical value is 1.2
  645. * output, set a value between 0.6 and 1 for best results, a high value increases more the low
  646. * value sensitivity... and the output saturates faster, recommended value: 0.95
  647. * involved in the computation of the local "motion mean" for the local adaptation computation
  648. * in the computation of the local "motion mean" for the local adaptation computation
  649. */
  650. - (void)setupIPLMagnoChannel NS_SWIFT_NAME(setupIPLMagnoChannel());
  651. //
  652. // void cv::bioinspired::Retina::run(Mat inputImage)
  653. //
  654. /**
  655. * Method which allows retina to be applied on an input image,
  656. *
  657. * after run, encapsulated retina module is ready to deliver its outputs using dedicated
  658. * acccessors, see getParvo and getMagno methods
  659. * @param inputImage the input Mat image to be processed, can be gray level or BGR coded in any
  660. * format (from 8bit to 16bits)
  661. */
  662. - (void)run:(Mat*)inputImage NS_SWIFT_NAME(run(inputImage:));
  663. //
  664. // void cv::bioinspired::Retina::applyFastToneMapping(Mat inputImage, Mat& outputToneMappedImage)
  665. //
  666. /**
  667. * Method which processes an image in the aim to correct its luminance correct
  668. * backlight problems, enhance details in shadows.
  669. *
  670. * This method is designed to perform High Dynamic Range image tone mapping (compress \>8bit/pixel
  671. * images to 8bit/pixel). This is a simplified version of the Retina Parvocellular model
  672. * (simplified version of the run/getParvo methods call) since it does not include the
  673. * spatio-temporal filter modelling the Outer Plexiform Layer of the retina that performs spectral
  674. * whitening and many other stuff. However, it works great for tone mapping and in a faster way.
  675. *
  676. * Check the demos and experiments section to see examples and the way to perform tone mapping
  677. * using the original retina model and the method.
  678. *
  679. * @param inputImage the input image to process (should be coded in float format : CV_32F,
  680. * CV_32FC1, CV_32F_C3, CV_32F_C4, the 4th channel won't be considered).
  681. * @param outputToneMappedImage the output 8bit/channel tone mapped image (CV_8U or CV_8UC3 format).
  682. */
  683. - (void)applyFastToneMapping:(Mat*)inputImage outputToneMappedImage:(Mat*)outputToneMappedImage NS_SWIFT_NAME(applyFastToneMapping(inputImage:outputToneMappedImage:));
  684. //
  685. // void cv::bioinspired::Retina::getParvo(Mat& retinaOutput_parvo)
  686. //
  687. /**
  688. * Accessor of the details channel of the retina (models foveal vision).
  689. *
  690. * Warning, getParvoRAW methods return buffers that are not rescaled within range [0;255] while
  691. * the non RAW method allows a normalized matrix to be retrieved.
  692. *
  693. * @param retinaOutput_parvo the output buffer (reallocated if necessary), format can be :
  694. * - a Mat, this output is rescaled for standard 8bits image processing use in OpenCV
  695. * - RAW methods actually return a 1D matrix (encoding is R1, R2, ... Rn, G1, G2, ..., Gn, B1,
  696. * B2, ...Bn), this output is the original retina filter model output, without any
  697. * quantification or rescaling.
  698. * @see `-getParvoRAW:`
  699. */
  700. - (void)getParvo:(Mat*)retinaOutput_parvo NS_SWIFT_NAME(getParvo(retinaOutput_parvo:));
  701. //
  702. // void cv::bioinspired::Retina::getParvoRAW(Mat& retinaOutput_parvo)
  703. //
  704. /**
  705. * Accessor of the details channel of the retina (models foveal vision).
  706. * @see `-getParvo:`
  707. */
  708. - (void)getParvoRAW:(Mat*)retinaOutput_parvo NS_SWIFT_NAME(getParvoRAW(retinaOutput_parvo:));
  709. //
  710. // void cv::bioinspired::Retina::getMagno(Mat& retinaOutput_magno)
  711. //
  712. /**
  713. * Accessor of the motion channel of the retina (models peripheral vision).
  714. *
  715. * Warning, getMagnoRAW methods return buffers that are not rescaled within range [0;255] while
  716. * the non RAW method allows a normalized matrix to be retrieved.
  717. * @param retinaOutput_magno the output buffer (reallocated if necessary), format can be :
  718. * - a Mat, this output is rescaled for standard 8bits image processing use in OpenCV
  719. * - RAW methods actually return a 1D matrix (encoding is M1, M2,... Mn), this output is the
  720. * original retina filter model output, without any quantification or rescaling.
  721. * @see `-getMagnoRAW:`
  722. */
  723. - (void)getMagno:(Mat*)retinaOutput_magno NS_SWIFT_NAME(getMagno(retinaOutput_magno:));
  724. //
  725. // void cv::bioinspired::Retina::getMagnoRAW(Mat& retinaOutput_magno)
  726. //
  727. /**
  728. * Accessor of the motion channel of the retina (models peripheral vision).
  729. * @see `-getMagno:`
  730. */
  731. - (void)getMagnoRAW:(Mat*)retinaOutput_magno NS_SWIFT_NAME(getMagnoRAW(retinaOutput_magno:));
  732. //
  733. // Mat cv::bioinspired::Retina::getMagnoRAW()
  734. //
  735. - (Mat*)getMagnoRAW NS_SWIFT_NAME(getMagnoRAW());
  736. //
  737. // Mat cv::bioinspired::Retina::getParvoRAW()
  738. //
  739. - (Mat*)getParvoRAW NS_SWIFT_NAME(getParvoRAW());
  740. //
  741. // void cv::bioinspired::Retina::setColorSaturation(bool saturateColors = true, float colorSaturationValue = 4.0f)
  742. //
  743. /**
  744. * Activate color saturation as the final step of the color demultiplexing process -\> this
  745. * saturation is a sigmoide function applied to each channel of the demultiplexed image.
  746. * @param saturateColors boolean that activates color saturation (if true) or desactivate (if false)
  747. * @param colorSaturationValue the saturation factor : a simple factor applied on the chrominance
  748. * buffers
  749. */
  750. - (void)setColorSaturation:(BOOL)saturateColors colorSaturationValue:(float)colorSaturationValue NS_SWIFT_NAME(setColorSaturation(saturateColors:colorSaturationValue:));
  751. /**
  752. * Activate color saturation as the final step of the color demultiplexing process -\> this
  753. * saturation is a sigmoide function applied to each channel of the demultiplexed image.
  754. * @param saturateColors boolean that activates color saturation (if true) or desactivate (if false)
  755. * buffers
  756. */
  757. - (void)setColorSaturation:(BOOL)saturateColors NS_SWIFT_NAME(setColorSaturation(saturateColors:));
  758. /**
  759. * Activate color saturation as the final step of the color demultiplexing process -\> this
  760. * saturation is a sigmoide function applied to each channel of the demultiplexed image.
  761. * buffers
  762. */
  763. - (void)setColorSaturation NS_SWIFT_NAME(setColorSaturation());
  764. //
  765. // void cv::bioinspired::Retina::clearBuffers()
  766. //
  767. /**
  768. * Clears all retina buffers
  769. *
  770. * (equivalent to opening the eyes after a long period of eye close ;o) whatchout the temporal
  771. * transition occuring just after this method call.
  772. */
  773. - (void)clearBuffers NS_SWIFT_NAME(clearBuffers());
  774. //
  775. // void cv::bioinspired::Retina::activateMovingContoursProcessing(bool activate)
  776. //
  777. /**
  778. * Activate/desactivate the Magnocellular pathway processing (motion information extraction), by
  779. * default, it is activated
  780. * @param activate true if Magnocellular output should be activated, false if not... if activated,
  781. * the Magnocellular output can be retrieved using the **getMagno** methods
  782. */
  783. - (void)activateMovingContoursProcessing:(BOOL)activate NS_SWIFT_NAME(activateMovingContoursProcessing(activate:));
  784. //
  785. // void cv::bioinspired::Retina::activateContoursProcessing(bool activate)
  786. //
  787. /**
  788. * Activate/desactivate the Parvocellular pathway processing (contours information extraction), by
  789. * default, it is activated
  790. * @param activate true if Parvocellular (contours information extraction) output should be
  791. * activated, false if not... if activated, the Parvocellular output can be retrieved using the
  792. * Retina::getParvo methods
  793. */
  794. - (void)activateContoursProcessing:(BOOL)activate NS_SWIFT_NAME(activateContoursProcessing(activate:));
  795. //
  796. // static Ptr_Retina cv::bioinspired::Retina::create(Size inputSize)
  797. //
  798. + (Retina*)create:(Size2i*)inputSize NS_SWIFT_NAME(create(inputSize:));
  799. //
  800. // static Ptr_Retina cv::bioinspired::Retina::create(Size inputSize, bool colorMode, int colorSamplingMethod = RETINA_COLOR_BAYER, bool useRetinaLogSampling = false, float reductionFactor = 1.0f, float samplingStrength = 10.0f)
  801. //
  802. /**
  803. * Constructors from standardized interfaces : retreive a smart pointer to a Retina instance
  804. *
  805. * @param inputSize the input frame size
  806. * @param colorMode the chosen processing mode : with or without color processing
  807. * @param colorSamplingMethod specifies which kind of color sampling will be used :
  808. * - cv::bioinspired::RETINA_COLOR_RANDOM: each pixel position is either R, G or B in a random choice
  809. * - cv::bioinspired::RETINA_COLOR_DIAGONAL: color sampling is RGBRGBRGB..., line 2 BRGBRGBRG..., line 3, GBRGBRGBR...
  810. * - cv::bioinspired::RETINA_COLOR_BAYER: standard bayer sampling
  811. * @param useRetinaLogSampling activate retina log sampling, if true, the 2 following parameters can
  812. * be used
  813. * @param reductionFactor only usefull if param useRetinaLogSampling=true, specifies the reduction
  814. * factor of the output frame (as the center (fovea) is high resolution and corners can be
  815. * underscaled, then a reduction of the output is allowed without precision leak
  816. * @param samplingStrength only usefull if param useRetinaLogSampling=true, specifies the strength of
  817. * the log scale that is applied
  818. */
  819. + (Retina*)create:(Size2i*)inputSize colorMode:(BOOL)colorMode colorSamplingMethod:(int)colorSamplingMethod useRetinaLogSampling:(BOOL)useRetinaLogSampling reductionFactor:(float)reductionFactor samplingStrength:(float)samplingStrength NS_SWIFT_NAME(create(inputSize:colorMode:colorSamplingMethod:useRetinaLogSampling:reductionFactor:samplingStrength:));
  820. /**
  821. * Constructors from standardized interfaces : retreive a smart pointer to a Retina instance
  822. *
  823. * @param inputSize the input frame size
  824. * @param colorMode the chosen processing mode : with or without color processing
  825. * @param colorSamplingMethod specifies which kind of color sampling will be used :
  826. * - cv::bioinspired::RETINA_COLOR_RANDOM: each pixel position is either R, G or B in a random choice
  827. * - cv::bioinspired::RETINA_COLOR_DIAGONAL: color sampling is RGBRGBRGB..., line 2 BRGBRGBRG..., line 3, GBRGBRGBR...
  828. * - cv::bioinspired::RETINA_COLOR_BAYER: standard bayer sampling
  829. * @param useRetinaLogSampling activate retina log sampling, if true, the 2 following parameters can
  830. * be used
  831. * @param reductionFactor only usefull if param useRetinaLogSampling=true, specifies the reduction
  832. * factor of the output frame (as the center (fovea) is high resolution and corners can be
  833. * underscaled, then a reduction of the output is allowed without precision leak
  834. * the log scale that is applied
  835. */
  836. + (Retina*)create:(Size2i*)inputSize colorMode:(BOOL)colorMode colorSamplingMethod:(int)colorSamplingMethod useRetinaLogSampling:(BOOL)useRetinaLogSampling reductionFactor:(float)reductionFactor NS_SWIFT_NAME(create(inputSize:colorMode:colorSamplingMethod:useRetinaLogSampling:reductionFactor:));
  837. /**
  838. * Constructors from standardized interfaces : retreive a smart pointer to a Retina instance
  839. *
  840. * @param inputSize the input frame size
  841. * @param colorMode the chosen processing mode : with or without color processing
  842. * @param colorSamplingMethod specifies which kind of color sampling will be used :
  843. * - cv::bioinspired::RETINA_COLOR_RANDOM: each pixel position is either R, G or B in a random choice
  844. * - cv::bioinspired::RETINA_COLOR_DIAGONAL: color sampling is RGBRGBRGB..., line 2 BRGBRGBRG..., line 3, GBRGBRGBR...
  845. * - cv::bioinspired::RETINA_COLOR_BAYER: standard bayer sampling
  846. * @param useRetinaLogSampling activate retina log sampling, if true, the 2 following parameters can
  847. * be used
  848. * factor of the output frame (as the center (fovea) is high resolution and corners can be
  849. * underscaled, then a reduction of the output is allowed without precision leak
  850. * the log scale that is applied
  851. */
  852. + (Retina*)create:(Size2i*)inputSize colorMode:(BOOL)colorMode colorSamplingMethod:(int)colorSamplingMethod useRetinaLogSampling:(BOOL)useRetinaLogSampling NS_SWIFT_NAME(create(inputSize:colorMode:colorSamplingMethod:useRetinaLogSampling:));
  853. /**
  854. * Constructors from standardized interfaces : retreive a smart pointer to a Retina instance
  855. *
  856. * @param inputSize the input frame size
  857. * @param colorMode the chosen processing mode : with or without color processing
  858. * @param colorSamplingMethod specifies which kind of color sampling will be used :
  859. * - cv::bioinspired::RETINA_COLOR_RANDOM: each pixel position is either R, G or B in a random choice
  860. * - cv::bioinspired::RETINA_COLOR_DIAGONAL: color sampling is RGBRGBRGB..., line 2 BRGBRGBRG..., line 3, GBRGBRGBR...
  861. * - cv::bioinspired::RETINA_COLOR_BAYER: standard bayer sampling
  862. * be used
  863. * factor of the output frame (as the center (fovea) is high resolution and corners can be
  864. * underscaled, then a reduction of the output is allowed without precision leak
  865. * the log scale that is applied
  866. */
  867. + (Retina*)create:(Size2i*)inputSize colorMode:(BOOL)colorMode colorSamplingMethod:(int)colorSamplingMethod NS_SWIFT_NAME(create(inputSize:colorMode:colorSamplingMethod:));
  868. /**
  869. * Constructors from standardized interfaces : retreive a smart pointer to a Retina instance
  870. *
  871. * @param inputSize the input frame size
  872. * @param colorMode the chosen processing mode : with or without color processing
  873. * - cv::bioinspired::RETINA_COLOR_RANDOM: each pixel position is either R, G or B in a random choice
  874. * - cv::bioinspired::RETINA_COLOR_DIAGONAL: color sampling is RGBRGBRGB..., line 2 BRGBRGBRG..., line 3, GBRGBRGBR...
  875. * - cv::bioinspired::RETINA_COLOR_BAYER: standard bayer sampling
  876. * be used
  877. * factor of the output frame (as the center (fovea) is high resolution and corners can be
  878. * underscaled, then a reduction of the output is allowed without precision leak
  879. * the log scale that is applied
  880. */
  881. + (Retina*)create:(Size2i*)inputSize colorMode:(BOOL)colorMode NS_SWIFT_NAME(create(inputSize:colorMode:));
  882. @end
  883. NS_ASSUME_NONNULL_END