Dnn.h 55 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162
  1. //
  2. // This file is auto-generated. Please don't modify it!
  3. //
  4. #pragma once
  5. #ifdef __cplusplus
  6. //#import "opencv.hpp"
  7. #import "opencv2/dnn.hpp"
  8. #else
  9. #define CV_EXPORTS
  10. #endif
  11. #import <Foundation/Foundation.h>
  12. @class ByteVector;
  13. @class FloatVector;
  14. @class Image2BlobParams;
  15. @class IntVector;
  16. @class Mat;
  17. @class Net;
  18. @class Rect2d;
  19. @class Rect2i;
  20. @class RotatedRect;
  21. @class Scalar;
  22. @class Size2i;
  23. // C++: enum Backend (cv.dnn.Backend)
  24. typedef NS_ENUM(int, Backend) {
  25. DNN_BACKEND_DEFAULT = 0,
  26. DNN_BACKEND_HALIDE = 0+1,
  27. DNN_BACKEND_INFERENCE_ENGINE = 0+2,
  28. DNN_BACKEND_OPENCV = 0+3,
  29. DNN_BACKEND_VKCOM = 0+4,
  30. DNN_BACKEND_CUDA = 0+5,
  31. DNN_BACKEND_WEBNN = 0+6,
  32. DNN_BACKEND_TIMVX = 0+7,
  33. DNN_BACKEND_CANN = 0+8
  34. };
  35. // C++: enum DataLayout (cv.dnn.DataLayout)
  36. typedef NS_ENUM(int, DataLayout) {
  37. DNN_LAYOUT_UNKNOWN = 0,
  38. DNN_LAYOUT_ND = 1,
  39. DNN_LAYOUT_NCHW = 2,
  40. DNN_LAYOUT_NCDHW = 3,
  41. DNN_LAYOUT_NHWC = 4,
  42. DNN_LAYOUT_NDHWC = 5,
  43. DNN_LAYOUT_PLANAR = 6
  44. };
  45. // C++: enum ImagePaddingMode (cv.dnn.ImagePaddingMode)
  46. typedef NS_ENUM(int, ImagePaddingMode) {
  47. DNN_PMODE_NULL = 0,
  48. DNN_PMODE_CROP_CENTER = 1,
  49. DNN_PMODE_LETTERBOX = 2
  50. };
  51. // C++: enum SoftNMSMethod (cv.dnn.SoftNMSMethod)
  52. typedef NS_ENUM(int, SoftNMSMethod) {
  53. SoftNMSMethod_SOFTNMS_LINEAR NS_SWIFT_NAME(SOFTNMS_LINEAR) = 1,
  54. SoftNMSMethod_SOFTNMS_GAUSSIAN NS_SWIFT_NAME(SOFTNMS_GAUSSIAN) = 2
  55. };
  56. // C++: enum Target (cv.dnn.Target)
  57. typedef NS_ENUM(int, Target) {
  58. DNN_TARGET_CPU = 0,
  59. DNN_TARGET_OPENCL = 0+1,
  60. DNN_TARGET_OPENCL_FP16 = 0+2,
  61. DNN_TARGET_MYRIAD = 0+3,
  62. DNN_TARGET_VULKAN = 0+4,
  63. DNN_TARGET_FPGA = 0+5,
  64. DNN_TARGET_CUDA = 0+6,
  65. DNN_TARGET_CUDA_FP16 = 0+7,
  66. DNN_TARGET_HDDL = 0+8,
  67. DNN_TARGET_NPU = 0+9,
  68. DNN_TARGET_CPU_FP16 = 0+10
  69. };
  70. NS_ASSUME_NONNULL_BEGIN
  71. // C++: class Dnn
  72. /**
  73. * The Dnn module
  74. *
  75. * Member classes: `DictValue`, `Layer`, `Net`, `Image2BlobParams`, `Model`, `ClassificationModel`, `KeypointsModel`, `SegmentationModel`, `DetectionModel`, `TextRecognitionModel`, `TextDetectionModel`, `TextDetectionModel_EAST`, `TextDetectionModel_DB`
  76. *
  77. * Member enums: `Backend`, `Target`, `DataLayout`, `ImagePaddingMode`, `SoftNMSMethod`
  78. */
  79. CV_EXPORTS @interface Dnn : NSObject
  80. #pragma mark - Methods
  81. //
  82. // vector_Target cv::dnn::getAvailableTargets(dnn_Backend be)
  83. //
  84. // Return type 'vector_Target' is not supported, skipping the function
  85. //
  86. // Net cv::dnn::readNetFromDarknet(String cfgFile, String darknetModel = String())
  87. //
  88. /**
  89. * Reads a network model stored in <a href="https://pjreddie.com/darknet/">Darknet</a> model files.
  90. * @param cfgFile path to the .cfg file with text description of the network architecture.
  91. * @param darknetModel path to the .weights file with learned network.
  92. * @return Network object that ready to do forward, throw an exception in failure cases.
  93. */
  94. + (Net*)readNetFromDarknetFile:(NSString*)cfgFile darknetModel:(NSString*)darknetModel NS_SWIFT_NAME(readNetFromDarknet(cfgFile:darknetModel:));
  95. /**
  96. * Reads a network model stored in <a href="https://pjreddie.com/darknet/">Darknet</a> model files.
  97. * @param cfgFile path to the .cfg file with text description of the network architecture.
  98. * @return Network object that ready to do forward, throw an exception in failure cases.
  99. */
  100. + (Net*)readNetFromDarknetFile:(NSString*)cfgFile NS_SWIFT_NAME(readNetFromDarknet(cfgFile:));
  101. //
  102. // Net cv::dnn::readNetFromDarknet(vector_uchar bufferCfg, vector_uchar bufferModel = std::vector<uchar>())
  103. //
  104. /**
  105. * Reads a network model stored in <a href="https://pjreddie.com/darknet/">Darknet</a> model files.
  106. * @param bufferCfg A buffer contains a content of .cfg file with text description of the network architecture.
  107. * @param bufferModel A buffer contains a content of .weights file with learned network.
  108. * @return Net object.
  109. */
  110. + (Net*)readNetFromDarknetBuffer:(ByteVector*)bufferCfg bufferModel:(ByteVector*)bufferModel NS_SWIFT_NAME(readNetFromDarknet(bufferCfg:bufferModel:));
  111. /**
  112. * Reads a network model stored in <a href="https://pjreddie.com/darknet/">Darknet</a> model files.
  113. * @param bufferCfg A buffer contains a content of .cfg file with text description of the network architecture.
  114. * @return Net object.
  115. */
  116. + (Net*)readNetFromDarknetBuffer:(ByteVector*)bufferCfg NS_SWIFT_NAME(readNetFromDarknet(bufferCfg:));
  117. //
  118. // Net cv::dnn::readNetFromCaffe(String prototxt, String caffeModel = String())
  119. //
  120. /**
  121. * Reads a network model stored in <a href="http://caffe.berkeleyvision.org">Caffe</a> framework's format.
  122. * @param prototxt path to the .prototxt file with text description of the network architecture.
  123. * @param caffeModel path to the .caffemodel file with learned network.
  124. * @return Net object.
  125. */
  126. + (Net*)readNetFromCaffeFile:(NSString*)prototxt caffeModel:(NSString*)caffeModel NS_SWIFT_NAME(readNetFromCaffe(prototxt:caffeModel:));
  127. /**
  128. * Reads a network model stored in <a href="http://caffe.berkeleyvision.org">Caffe</a> framework's format.
  129. * @param prototxt path to the .prototxt file with text description of the network architecture.
  130. * @return Net object.
  131. */
  132. + (Net*)readNetFromCaffeFile:(NSString*)prototxt NS_SWIFT_NAME(readNetFromCaffe(prototxt:));
  133. //
  134. // Net cv::dnn::readNetFromCaffe(vector_uchar bufferProto, vector_uchar bufferModel = std::vector<uchar>())
  135. //
  136. /**
  137. * Reads a network model stored in Caffe model in memory.
  138. * @param bufferProto buffer containing the content of the .prototxt file
  139. * @param bufferModel buffer containing the content of the .caffemodel file
  140. * @return Net object.
  141. */
  142. + (Net*)readNetFromCaffeBuffer:(ByteVector*)bufferProto bufferModel:(ByteVector*)bufferModel NS_SWIFT_NAME(readNetFromCaffe(bufferProto:bufferModel:));
  143. /**
  144. * Reads a network model stored in Caffe model in memory.
  145. * @param bufferProto buffer containing the content of the .prototxt file
  146. * @return Net object.
  147. */
  148. + (Net*)readNetFromCaffeBuffer:(ByteVector*)bufferProto NS_SWIFT_NAME(readNetFromCaffe(bufferProto:));
  149. //
  150. // Net cv::dnn::readNetFromTensorflow(String model, String config = String())
  151. //
  152. /**
  153. * Reads a network model stored in <a href="https://www.tensorflow.org/">TensorFlow</a> framework's format.
  154. * @param model path to the .pb file with binary protobuf description of the network architecture
  155. * @param config path to the .pbtxt file that contains text graph definition in protobuf format.
  156. * Resulting Net object is built by text graph using weights from a binary one that
  157. * let us make it more flexible.
  158. * @return Net object.
  159. */
  160. + (Net*)readNetFromTensorflowFile:(NSString*)model config:(NSString*)config NS_SWIFT_NAME(readNetFromTensorflow(model:config:));
  161. /**
  162. * Reads a network model stored in <a href="https://www.tensorflow.org/">TensorFlow</a> framework's format.
  163. * @param model path to the .pb file with binary protobuf description of the network architecture
  164. * Resulting Net object is built by text graph using weights from a binary one that
  165. * let us make it more flexible.
  166. * @return Net object.
  167. */
  168. + (Net*)readNetFromTensorflowFile:(NSString*)model NS_SWIFT_NAME(readNetFromTensorflow(model:));
  169. //
  170. // Net cv::dnn::readNetFromTensorflow(vector_uchar bufferModel, vector_uchar bufferConfig = std::vector<uchar>())
  171. //
  172. /**
  173. * Reads a network model stored in <a href="https://www.tensorflow.org/">TensorFlow</a> framework's format.
  174. * @param bufferModel buffer containing the content of the pb file
  175. * @param bufferConfig buffer containing the content of the pbtxt file
  176. * @return Net object.
  177. */
  178. + (Net*)readNetFromTensorflowBuffer:(ByteVector*)bufferModel bufferConfig:(ByteVector*)bufferConfig NS_SWIFT_NAME(readNetFromTensorflow(bufferModel:bufferConfig:));
  179. /**
  180. * Reads a network model stored in <a href="https://www.tensorflow.org/">TensorFlow</a> framework's format.
  181. * @param bufferModel buffer containing the content of the pb file
  182. * @return Net object.
  183. */
  184. + (Net*)readNetFromTensorflowBuffer:(ByteVector*)bufferModel NS_SWIFT_NAME(readNetFromTensorflow(bufferModel:));
  185. //
  186. // Net cv::dnn::readNetFromTFLite(String model)
  187. //
  188. /**
  189. * Reads a network model stored in <a href="https://www.tensorflow.org/lite">TFLite</a> framework's format.
  190. * @param model path to the .tflite file with binary flatbuffers description of the network architecture
  191. * @return Net object.
  192. */
  193. + (Net*)readNetFromTFLiteFile:(NSString*)model NS_SWIFT_NAME(readNetFromTFLite(model:));
  194. //
  195. // Net cv::dnn::readNetFromTFLite(vector_uchar bufferModel)
  196. //
  197. /**
  198. * Reads a network model stored in <a href="https://www.tensorflow.org/lite">TFLite</a> framework's format.
  199. * @param bufferModel buffer containing the content of the tflite file
  200. * @return Net object.
  201. */
  202. + (Net*)readNetFromTFLite:(ByteVector*)bufferModel NS_SWIFT_NAME(readNetFromTFLite(bufferModel:));
  203. //
  204. // Net cv::dnn::readNetFromTorch(String model, bool isBinary = true, bool evaluate = true)
  205. //
  206. /**
  207. * Reads a network model stored in <a href="http://torch.ch">Torch7</a> framework's format.
  208. * @param model path to the file, dumped from Torch by using torch.save() function.
  209. * @param isBinary specifies whether the network was serialized in ascii mode or binary.
  210. * @param evaluate specifies testing phase of network. If true, it's similar to evaluate() method in Torch.
  211. * @return Net object.
  212. *
  213. * NOTE: Ascii mode of Torch serializer is more preferable, because binary mode extensively use `long` type of C language,
  214. * which has various bit-length on different systems.
  215. *
  216. * The loading file must contain serialized <a href="https://github.com/torch/nn/blob/master/doc/module.md">nn.Module</a> object
  217. * with importing network. Try to eliminate a custom objects from serialazing data to avoid importing errors.
  218. *
  219. * List of supported layers (i.e. object instances derived from Torch nn.Module class):
  220. * - nn.Sequential
  221. * - nn.Parallel
  222. * - nn.Concat
  223. * - nn.Linear
  224. * - nn.SpatialConvolution
  225. * - nn.SpatialMaxPooling, nn.SpatialAveragePooling
  226. * - nn.ReLU, nn.TanH, nn.Sigmoid
  227. * - nn.Reshape
  228. * - nn.SoftMax, nn.LogSoftMax
  229. *
  230. * Also some equivalents of these classes from cunn, cudnn, and fbcunn may be successfully imported.
  231. */
  232. + (Net*)readNetFromTorch:(NSString*)model isBinary:(BOOL)isBinary evaluate:(BOOL)evaluate NS_SWIFT_NAME(readNetFromTorch(model:isBinary:evaluate:));
  233. /**
  234. * Reads a network model stored in <a href="http://torch.ch">Torch7</a> framework's format.
  235. * @param model path to the file, dumped from Torch by using torch.save() function.
  236. * @param isBinary specifies whether the network was serialized in ascii mode or binary.
  237. * @return Net object.
  238. *
  239. * NOTE: Ascii mode of Torch serializer is more preferable, because binary mode extensively use `long` type of C language,
  240. * which has various bit-length on different systems.
  241. *
  242. * The loading file must contain serialized <a href="https://github.com/torch/nn/blob/master/doc/module.md">nn.Module</a> object
  243. * with importing network. Try to eliminate a custom objects from serialazing data to avoid importing errors.
  244. *
  245. * List of supported layers (i.e. object instances derived from Torch nn.Module class):
  246. * - nn.Sequential
  247. * - nn.Parallel
  248. * - nn.Concat
  249. * - nn.Linear
  250. * - nn.SpatialConvolution
  251. * - nn.SpatialMaxPooling, nn.SpatialAveragePooling
  252. * - nn.ReLU, nn.TanH, nn.Sigmoid
  253. * - nn.Reshape
  254. * - nn.SoftMax, nn.LogSoftMax
  255. *
  256. * Also some equivalents of these classes from cunn, cudnn, and fbcunn may be successfully imported.
  257. */
  258. + (Net*)readNetFromTorch:(NSString*)model isBinary:(BOOL)isBinary NS_SWIFT_NAME(readNetFromTorch(model:isBinary:));
  259. /**
  260. * Reads a network model stored in <a href="http://torch.ch">Torch7</a> framework's format.
  261. * @param model path to the file, dumped from Torch by using torch.save() function.
  262. * @return Net object.
  263. *
  264. * NOTE: Ascii mode of Torch serializer is more preferable, because binary mode extensively use `long` type of C language,
  265. * which has various bit-length on different systems.
  266. *
  267. * The loading file must contain serialized <a href="https://github.com/torch/nn/blob/master/doc/module.md">nn.Module</a> object
  268. * with importing network. Try to eliminate a custom objects from serialazing data to avoid importing errors.
  269. *
  270. * List of supported layers (i.e. object instances derived from Torch nn.Module class):
  271. * - nn.Sequential
  272. * - nn.Parallel
  273. * - nn.Concat
  274. * - nn.Linear
  275. * - nn.SpatialConvolution
  276. * - nn.SpatialMaxPooling, nn.SpatialAveragePooling
  277. * - nn.ReLU, nn.TanH, nn.Sigmoid
  278. * - nn.Reshape
  279. * - nn.SoftMax, nn.LogSoftMax
  280. *
  281. * Also some equivalents of these classes from cunn, cudnn, and fbcunn may be successfully imported.
  282. */
  283. + (Net*)readNetFromTorch:(NSString*)model NS_SWIFT_NAME(readNetFromTorch(model:));
  284. //
  285. // Net cv::dnn::readNet(String model, String config = "", String framework = "")
  286. //
  287. /**
  288. * Read deep learning network represented in one of the supported formats.
  289. * @param model Binary file contains trained weights. The following file
  290. * extensions are expected for models from different frameworks:
  291. * * `*.caffemodel` (Caffe, http://caffe.berkeleyvision.org/)
  292. * * `*.pb` (TensorFlow, https://www.tensorflow.org/)
  293. * * `*.t7` | `*.net` (Torch, http://torch.ch/)
  294. * * `*.weights` (Darknet, https://pjreddie.com/darknet/)
  295. * * `*.bin` (DLDT, https://software.intel.com/openvino-toolkit)
  296. * * `*.onnx` (ONNX, https://onnx.ai/)
  297. * @param config Text file contains network configuration. It could be a
  298. * file with the following extensions:
  299. * * `*.prototxt` (Caffe, http://caffe.berkeleyvision.org/)
  300. * * `*.pbtxt` (TensorFlow, https://www.tensorflow.org/)
  301. * * `*.cfg` (Darknet, https://pjreddie.com/darknet/)
  302. * * `*.xml` (DLDT, https://software.intel.com/openvino-toolkit)
  303. * @param framework Explicit framework name tag to determine a format.
  304. * @return Net object.
  305. *
  306. * This function automatically detects an origin framework of trained model
  307. * and calls an appropriate function such REF: readNetFromCaffe, REF: readNetFromTensorflow,
  308. * REF: readNetFromTorch or REF: readNetFromDarknet. An order of @p model and @p config
  309. * arguments does not matter.
  310. */
  311. + (Net*)readNet:(NSString*)model config:(NSString*)config framework:(NSString*)framework NS_SWIFT_NAME(readNet(model:config:framework:));
  312. /**
  313. * Read deep learning network represented in one of the supported formats.
  314. * @param model Binary file contains trained weights. The following file
  315. * extensions are expected for models from different frameworks:
  316. * * `*.caffemodel` (Caffe, http://caffe.berkeleyvision.org/)
  317. * * `*.pb` (TensorFlow, https://www.tensorflow.org/)
  318. * * `*.t7` | `*.net` (Torch, http://torch.ch/)
  319. * * `*.weights` (Darknet, https://pjreddie.com/darknet/)
  320. * * `*.bin` (DLDT, https://software.intel.com/openvino-toolkit)
  321. * * `*.onnx` (ONNX, https://onnx.ai/)
  322. * @param config Text file contains network configuration. It could be a
  323. * file with the following extensions:
  324. * * `*.prototxt` (Caffe, http://caffe.berkeleyvision.org/)
  325. * * `*.pbtxt` (TensorFlow, https://www.tensorflow.org/)
  326. * * `*.cfg` (Darknet, https://pjreddie.com/darknet/)
  327. * * `*.xml` (DLDT, https://software.intel.com/openvino-toolkit)
  328. * @return Net object.
  329. *
  330. * This function automatically detects an origin framework of trained model
  331. * and calls an appropriate function such REF: readNetFromCaffe, REF: readNetFromTensorflow,
  332. * REF: readNetFromTorch or REF: readNetFromDarknet. An order of @p model and @p config
  333. * arguments does not matter.
  334. */
  335. + (Net*)readNet:(NSString*)model config:(NSString*)config NS_SWIFT_NAME(readNet(model:config:));
  336. /**
  337. * Read deep learning network represented in one of the supported formats.
  338. * @param model Binary file contains trained weights. The following file
  339. * extensions are expected for models from different frameworks:
  340. * * `*.caffemodel` (Caffe, http://caffe.berkeleyvision.org/)
  341. * * `*.pb` (TensorFlow, https://www.tensorflow.org/)
  342. * * `*.t7` | `*.net` (Torch, http://torch.ch/)
  343. * * `*.weights` (Darknet, https://pjreddie.com/darknet/)
  344. * * `*.bin` (DLDT, https://software.intel.com/openvino-toolkit)
  345. * * `*.onnx` (ONNX, https://onnx.ai/)
  346. * file with the following extensions:
  347. * * `*.prototxt` (Caffe, http://caffe.berkeleyvision.org/)
  348. * * `*.pbtxt` (TensorFlow, https://www.tensorflow.org/)
  349. * * `*.cfg` (Darknet, https://pjreddie.com/darknet/)
  350. * * `*.xml` (DLDT, https://software.intel.com/openvino-toolkit)
  351. * @return Net object.
  352. *
  353. * This function automatically detects an origin framework of trained model
  354. * and calls an appropriate function such REF: readNetFromCaffe, REF: readNetFromTensorflow,
  355. * REF: readNetFromTorch or REF: readNetFromDarknet. An order of @p model and @p config
  356. * arguments does not matter.
  357. */
  358. + (Net*)readNet:(NSString*)model NS_SWIFT_NAME(readNet(model:));
  359. //
  360. // Net cv::dnn::readNet(String framework, vector_uchar bufferModel, vector_uchar bufferConfig = std::vector<uchar>())
  361. //
  362. /**
  363. * Read deep learning network represented in one of the supported formats.
  364. * This is an overloaded member function, provided for convenience.
  365. * It differs from the above function only in what argument(s) it accepts.
  366. * @param framework Name of origin framework.
  367. * @param bufferModel A buffer with a content of binary file with weights
  368. * @param bufferConfig A buffer with a content of text file contains network configuration.
  369. * @return Net object.
  370. */
  371. + (Net*)readNet:(NSString*)framework bufferModel:(ByteVector*)bufferModel bufferConfig:(ByteVector*)bufferConfig NS_SWIFT_NAME(readNet(framework:bufferModel:bufferConfig:));
  372. /**
  373. * Read deep learning network represented in one of the supported formats.
  374. * This is an overloaded member function, provided for convenience.
  375. * It differs from the above function only in what argument(s) it accepts.
  376. * @param framework Name of origin framework.
  377. * @param bufferModel A buffer with a content of binary file with weights
  378. * @return Net object.
  379. */
  380. + (Net*)readNet:(NSString*)framework bufferModel:(ByteVector*)bufferModel NS_SWIFT_NAME(readNet(framework:bufferModel:));
  381. //
  382. // Mat cv::dnn::readTorchBlob(String filename, bool isBinary = true)
  383. //
  384. /**
  385. * Loads blob which was serialized as torch.Tensor object of Torch7 framework.
  386. * @warning This function has the same limitations as readNetFromTorch().
  387. */
  388. + (Mat*)readTorchBlob:(NSString*)filename isBinary:(BOOL)isBinary NS_SWIFT_NAME(readTorchBlob(filename:isBinary:));
  389. /**
  390. * Loads blob which was serialized as torch.Tensor object of Torch7 framework.
  391. * @warning This function has the same limitations as readNetFromTorch().
  392. */
  393. + (Mat*)readTorchBlob:(NSString*)filename NS_SWIFT_NAME(readTorchBlob(filename:));
  394. //
  395. // Net cv::dnn::readNetFromModelOptimizer(String xml, String bin)
  396. //
  397. /**
  398. * Load a network from Intel's Model Optimizer intermediate representation.
  399. * @param xml XML configuration file with network's topology.
  400. * @param bin Binary file with trained weights.
  401. * @return Net object.
  402. * Networks imported from Intel's Model Optimizer are launched in Intel's Inference Engine
  403. * backend.
  404. */
  405. + (Net*)readNetFromModelOptimizer:(NSString*)xml bin:(NSString*)bin NS_SWIFT_NAME(readNetFromModelOptimizer(xml:bin:));
  406. //
  407. // Net cv::dnn::readNetFromModelOptimizer(vector_uchar bufferModelConfig, vector_uchar bufferWeights)
  408. //
  409. /**
  410. * Load a network from Intel's Model Optimizer intermediate representation.
  411. * @param bufferModelConfig Buffer contains XML configuration with network's topology.
  412. * @param bufferWeights Buffer contains binary data with trained weights.
  413. * @return Net object.
  414. * Networks imported from Intel's Model Optimizer are launched in Intel's Inference Engine
  415. * backend.
  416. */
  417. + (Net*)readNetFromModelOptimizer:(ByteVector*)bufferModelConfig bufferWeights:(ByteVector*)bufferWeights NS_SWIFT_NAME(readNetFromModelOptimizer(bufferModelConfig:bufferWeights:));
  418. //
  419. // Net cv::dnn::readNetFromONNX(String onnxFile)
  420. //
  421. /**
  422. * Reads a network model <a href="https://onnx.ai/">ONNX</a>.
  423. * @param onnxFile path to the .onnx file with text description of the network architecture.
  424. * @return Network object that ready to do forward, throw an exception in failure cases.
  425. */
  426. + (Net*)readNetFromONNXFile:(NSString*)onnxFile NS_SWIFT_NAME(readNetFromONNX(onnxFile:));
  427. //
  428. // Net cv::dnn::readNetFromONNX(vector_uchar buffer)
  429. //
  430. /**
  431. * Reads a network model from <a href="https://onnx.ai/">ONNX</a>
  432. * in-memory buffer.
  433. * @param buffer in-memory buffer that stores the ONNX model bytes.
  434. * @return Network object that ready to do forward, throw an exception
  435. * in failure cases.
  436. */
  437. + (Net*)readNetFromONNXBuffer:(ByteVector*)buffer NS_SWIFT_NAME(readNetFromONNX(buffer:));
  438. //
  439. // Mat cv::dnn::readTensorFromONNX(String path)
  440. //
  441. /**
  442. * Creates blob from .pb file.
  443. * @param path to the .pb file with input tensor.
  444. * @return Mat.
  445. */
  446. + (Mat*)readTensorFromONNX:(NSString*)path NS_SWIFT_NAME(readTensorFromONNX(path:));
  447. //
  448. // Mat cv::dnn::blobFromImage(Mat image, double scalefactor = 1.0, Size size = Size(), Scalar mean = Scalar(), bool swapRB = false, bool crop = false, int ddepth = CV_32F)
  449. //
  450. /**
  451. * Creates 4-dimensional blob from image. Optionally resizes and crops @p image from center,
  452. * subtract @p mean values, scales values by @p scalefactor, swap Blue and Red channels.
  453. * @param image input image (with 1-, 3- or 4-channels).
  454. * @param scalefactor multiplier for @p images values.
  455. * @param size spatial size for output image
  456. * @param mean scalar with mean values which are subtracted from channels. Values are intended
  457. * to be in (mean-R, mean-G, mean-B) order if @p image has BGR ordering and @p swapRB is true.
  458. * @param swapRB flag which indicates that swap first and last channels
  459. * in 3-channel image is necessary.
  460. * @param crop flag which indicates whether image will be cropped after resize or not
  461. * @param ddepth Depth of output blob. Choose CV_32F or CV_8U.
  462. * if @p crop is true, input image is resized so one side after resize is equal to corresponding
  463. * dimension in @p size and another one is equal or larger. Then, crop from the center is performed.
  464. * If @p crop is false, direct resize without cropping and preserving aspect ratio is performed.
  465. * @return 4-dimensional Mat with NCHW dimensions order.
  466. *
  467. * NOTE:
  468. * The order and usage of `scalefactor` and `mean` are (input - mean) * scalefactor.
  469. */
  470. + (Mat*)blobFromImage:(Mat*)image scalefactor:(double)scalefactor size:(Size2i*)size mean:(Scalar*)mean swapRB:(BOOL)swapRB crop:(BOOL)crop ddepth:(int)ddepth NS_SWIFT_NAME(blobFromImage(image:scalefactor:size:mean:swapRB:crop:ddepth:));
  471. /**
  472. * Creates 4-dimensional blob from image. Optionally resizes and crops @p image from center,
  473. * subtract @p mean values, scales values by @p scalefactor, swap Blue and Red channels.
  474. * @param image input image (with 1-, 3- or 4-channels).
  475. * @param scalefactor multiplier for @p images values.
  476. * @param size spatial size for output image
  477. * @param mean scalar with mean values which are subtracted from channels. Values are intended
  478. * to be in (mean-R, mean-G, mean-B) order if @p image has BGR ordering and @p swapRB is true.
  479. * @param swapRB flag which indicates that swap first and last channels
  480. * in 3-channel image is necessary.
  481. * @param crop flag which indicates whether image will be cropped after resize or not
  482. * if @p crop is true, input image is resized so one side after resize is equal to corresponding
  483. * dimension in @p size and another one is equal or larger. Then, crop from the center is performed.
  484. * If @p crop is false, direct resize without cropping and preserving aspect ratio is performed.
  485. * @return 4-dimensional Mat with NCHW dimensions order.
  486. *
  487. * NOTE:
  488. * The order and usage of `scalefactor` and `mean` are (input - mean) * scalefactor.
  489. */
  490. + (Mat*)blobFromImage:(Mat*)image scalefactor:(double)scalefactor size:(Size2i*)size mean:(Scalar*)mean swapRB:(BOOL)swapRB crop:(BOOL)crop NS_SWIFT_NAME(blobFromImage(image:scalefactor:size:mean:swapRB:crop:));
  491. /**
  492. * Creates 4-dimensional blob from image. Optionally resizes and crops @p image from center,
  493. * subtract @p mean values, scales values by @p scalefactor, swap Blue and Red channels.
  494. * @param image input image (with 1-, 3- or 4-channels).
  495. * @param scalefactor multiplier for @p images values.
  496. * @param size spatial size for output image
  497. * @param mean scalar with mean values which are subtracted from channels. Values are intended
  498. * to be in (mean-R, mean-G, mean-B) order if @p image has BGR ordering and @p swapRB is true.
  499. * @param swapRB flag which indicates that swap first and last channels
  500. * in 3-channel image is necessary.
  501. * if @p crop is true, input image is resized so one side after resize is equal to corresponding
  502. * dimension in @p size and another one is equal or larger. Then, crop from the center is performed.
  503. * If @p crop is false, direct resize without cropping and preserving aspect ratio is performed.
  504. * @return 4-dimensional Mat with NCHW dimensions order.
  505. *
  506. * NOTE:
  507. * The order and usage of `scalefactor` and `mean` are (input - mean) * scalefactor.
  508. */
  509. + (Mat*)blobFromImage:(Mat*)image scalefactor:(double)scalefactor size:(Size2i*)size mean:(Scalar*)mean swapRB:(BOOL)swapRB NS_SWIFT_NAME(blobFromImage(image:scalefactor:size:mean:swapRB:));
  510. /**
  511. * Creates 4-dimensional blob from image. Optionally resizes and crops @p image from center,
  512. * subtract @p mean values, scales values by @p scalefactor, swap Blue and Red channels.
  513. * @param image input image (with 1-, 3- or 4-channels).
  514. * @param scalefactor multiplier for @p images values.
  515. * @param size spatial size for output image
  516. * @param mean scalar with mean values which are subtracted from channels. Values are intended
  517. * to be in (mean-R, mean-G, mean-B) order if @p image has BGR ordering and @p swapRB is true.
  518. * in 3-channel image is necessary.
  519. * if @p crop is true, input image is resized so one side after resize is equal to corresponding
  520. * dimension in @p size and another one is equal or larger. Then, crop from the center is performed.
  521. * If @p crop is false, direct resize without cropping and preserving aspect ratio is performed.
  522. * @return 4-dimensional Mat with NCHW dimensions order.
  523. *
  524. * NOTE:
  525. * The order and usage of `scalefactor` and `mean` are (input - mean) * scalefactor.
  526. */
  527. + (Mat*)blobFromImage:(Mat*)image scalefactor:(double)scalefactor size:(Size2i*)size mean:(Scalar*)mean NS_SWIFT_NAME(blobFromImage(image:scalefactor:size:mean:));
  528. /**
  529. * Creates 4-dimensional blob from image. Optionally resizes and crops @p image from center,
  530. * subtract @p mean values, scales values by @p scalefactor, swap Blue and Red channels.
  531. * @param image input image (with 1-, 3- or 4-channels).
  532. * @param scalefactor multiplier for @p images values.
  533. * @param size spatial size for output image
  534. * to be in (mean-R, mean-G, mean-B) order if @p image has BGR ordering and @p swapRB is true.
  535. * in 3-channel image is necessary.
  536. * if @p crop is true, input image is resized so one side after resize is equal to corresponding
  537. * dimension in @p size and another one is equal or larger. Then, crop from the center is performed.
  538. * If @p crop is false, direct resize without cropping and preserving aspect ratio is performed.
  539. * @return 4-dimensional Mat with NCHW dimensions order.
  540. *
  541. * NOTE:
  542. * The order and usage of `scalefactor` and `mean` are (input - mean) * scalefactor.
  543. */
  544. + (Mat*)blobFromImage:(Mat*)image scalefactor:(double)scalefactor size:(Size2i*)size NS_SWIFT_NAME(blobFromImage(image:scalefactor:size:));
  545. /**
  546. * Creates 4-dimensional blob from image. Optionally resizes and crops @p image from center,
  547. * subtract @p mean values, scales values by @p scalefactor, swap Blue and Red channels.
  548. * @param image input image (with 1-, 3- or 4-channels).
  549. * @param scalefactor multiplier for @p images values.
  550. * to be in (mean-R, mean-G, mean-B) order if @p image has BGR ordering and @p swapRB is true.
  551. * in 3-channel image is necessary.
  552. * if @p crop is true, input image is resized so one side after resize is equal to corresponding
  553. * dimension in @p size and another one is equal or larger. Then, crop from the center is performed.
  554. * If @p crop is false, direct resize without cropping and preserving aspect ratio is performed.
  555. * @return 4-dimensional Mat with NCHW dimensions order.
  556. *
  557. * NOTE:
  558. * The order and usage of `scalefactor` and `mean` are (input - mean) * scalefactor.
  559. */
  560. + (Mat*)blobFromImage:(Mat*)image scalefactor:(double)scalefactor NS_SWIFT_NAME(blobFromImage(image:scalefactor:));
  561. /**
  562. * Creates 4-dimensional blob from image. Optionally resizes and crops @p image from center,
  563. * subtract @p mean values, scales values by @p scalefactor, swap Blue and Red channels.
  564. * @param image input image (with 1-, 3- or 4-channels).
  565. * to be in (mean-R, mean-G, mean-B) order if @p image has BGR ordering and @p swapRB is true.
  566. * in 3-channel image is necessary.
  567. * if @p crop is true, input image is resized so one side after resize is equal to corresponding
  568. * dimension in @p size and another one is equal or larger. Then, crop from the center is performed.
  569. * If @p crop is false, direct resize without cropping and preserving aspect ratio is performed.
  570. * @return 4-dimensional Mat with NCHW dimensions order.
  571. *
  572. * NOTE:
  573. * The order and usage of `scalefactor` and `mean` are (input - mean) * scalefactor.
  574. */
  575. + (Mat*)blobFromImage:(Mat*)image NS_SWIFT_NAME(blobFromImage(image:));
  576. //
  577. // Mat cv::dnn::blobFromImages(vector_Mat images, double scalefactor = 1.0, Size size = Size(), Scalar mean = Scalar(), bool swapRB = false, bool crop = false, int ddepth = CV_32F)
  578. //
  579. /**
  580. * Creates 4-dimensional blob from series of images. Optionally resizes and
  581. * crops @p images from center, subtract @p mean values, scales values by @p scalefactor,
  582. * swap Blue and Red channels.
  583. * @param images input images (all with 1-, 3- or 4-channels).
  584. * @param size spatial size for output image
  585. * @param mean scalar with mean values which are subtracted from channels. Values are intended
  586. * to be in (mean-R, mean-G, mean-B) order if @p image has BGR ordering and @p swapRB is true.
  587. * @param scalefactor multiplier for @p images values.
  588. * @param swapRB flag which indicates that swap first and last channels
  589. * in 3-channel image is necessary.
  590. * @param crop flag which indicates whether image will be cropped after resize or not
  591. * @param ddepth Depth of output blob. Choose CV_32F or CV_8U.
  592. * if @p crop is true, input image is resized so one side after resize is equal to corresponding
  593. * dimension in @p size and another one is equal or larger. Then, crop from the center is performed.
  594. * If @p crop is false, direct resize without cropping and preserving aspect ratio is performed.
  595. * @return 4-dimensional Mat with NCHW dimensions order.
  596. *
  597. * NOTE:
  598. * The order and usage of `scalefactor` and `mean` are (input - mean) * scalefactor.
  599. */
  600. + (Mat*)blobFromImages:(NSArray<Mat*>*)images scalefactor:(double)scalefactor size:(Size2i*)size mean:(Scalar*)mean swapRB:(BOOL)swapRB crop:(BOOL)crop ddepth:(int)ddepth NS_SWIFT_NAME(blobFromImages(images:scalefactor:size:mean:swapRB:crop:ddepth:));
  601. /**
  602. * Creates 4-dimensional blob from series of images. Optionally resizes and
  603. * crops @p images from center, subtract @p mean values, scales values by @p scalefactor,
  604. * swap Blue and Red channels.
  605. * @param images input images (all with 1-, 3- or 4-channels).
  606. * @param size spatial size for output image
  607. * @param mean scalar with mean values which are subtracted from channels. Values are intended
  608. * to be in (mean-R, mean-G, mean-B) order if @p image has BGR ordering and @p swapRB is true.
  609. * @param scalefactor multiplier for @p images values.
  610. * @param swapRB flag which indicates that swap first and last channels
  611. * in 3-channel image is necessary.
  612. * @param crop flag which indicates whether image will be cropped after resize or not
  613. * if @p crop is true, input image is resized so one side after resize is equal to corresponding
  614. * dimension in @p size and another one is equal or larger. Then, crop from the center is performed.
  615. * If @p crop is false, direct resize without cropping and preserving aspect ratio is performed.
  616. * @return 4-dimensional Mat with NCHW dimensions order.
  617. *
  618. * NOTE:
  619. * The order and usage of `scalefactor` and `mean` are (input - mean) * scalefactor.
  620. */
  621. + (Mat*)blobFromImages:(NSArray<Mat*>*)images scalefactor:(double)scalefactor size:(Size2i*)size mean:(Scalar*)mean swapRB:(BOOL)swapRB crop:(BOOL)crop NS_SWIFT_NAME(blobFromImages(images:scalefactor:size:mean:swapRB:crop:));
  622. /**
  623. * Creates 4-dimensional blob from series of images. Optionally resizes and
  624. * crops @p images from center, subtract @p mean values, scales values by @p scalefactor,
  625. * swap Blue and Red channels.
  626. * @param images input images (all with 1-, 3- or 4-channels).
  627. * @param size spatial size for output image
  628. * @param mean scalar with mean values which are subtracted from channels. Values are intended
  629. * to be in (mean-R, mean-G, mean-B) order if @p image has BGR ordering and @p swapRB is true.
  630. * @param scalefactor multiplier for @p images values.
  631. * @param swapRB flag which indicates that swap first and last channels
  632. * in 3-channel image is necessary.
  633. * if @p crop is true, input image is resized so one side after resize is equal to corresponding
  634. * dimension in @p size and another one is equal or larger. Then, crop from the center is performed.
  635. * If @p crop is false, direct resize without cropping and preserving aspect ratio is performed.
  636. * @return 4-dimensional Mat with NCHW dimensions order.
  637. *
  638. * NOTE:
  639. * The order and usage of `scalefactor` and `mean` are (input - mean) * scalefactor.
  640. */
  641. + (Mat*)blobFromImages:(NSArray<Mat*>*)images scalefactor:(double)scalefactor size:(Size2i*)size mean:(Scalar*)mean swapRB:(BOOL)swapRB NS_SWIFT_NAME(blobFromImages(images:scalefactor:size:mean:swapRB:));
  642. /**
  643. * Creates 4-dimensional blob from series of images. Optionally resizes and
  644. * crops @p images from center, subtract @p mean values, scales values by @p scalefactor,
  645. * swap Blue and Red channels.
  646. * @param images input images (all with 1-, 3- or 4-channels).
  647. * @param size spatial size for output image
  648. * @param mean scalar with mean values which are subtracted from channels. Values are intended
  649. * to be in (mean-R, mean-G, mean-B) order if @p image has BGR ordering and @p swapRB is true.
  650. * @param scalefactor multiplier for @p images values.
  651. * in 3-channel image is necessary.
  652. * if @p crop is true, input image is resized so one side after resize is equal to corresponding
  653. * dimension in @p size and another one is equal or larger. Then, crop from the center is performed.
  654. * If @p crop is false, direct resize without cropping and preserving aspect ratio is performed.
  655. * @return 4-dimensional Mat with NCHW dimensions order.
  656. *
  657. * NOTE:
  658. * The order and usage of `scalefactor` and `mean` are (input - mean) * scalefactor.
  659. */
  660. + (Mat*)blobFromImages:(NSArray<Mat*>*)images scalefactor:(double)scalefactor size:(Size2i*)size mean:(Scalar*)mean NS_SWIFT_NAME(blobFromImages(images:scalefactor:size:mean:));
  661. /**
  662. * Creates 4-dimensional blob from series of images. Optionally resizes and
  663. * crops @p images from center, subtract @p mean values, scales values by @p scalefactor,
  664. * swap Blue and Red channels.
  665. * @param images input images (all with 1-, 3- or 4-channels).
  666. * @param size spatial size for output image
  667. * to be in (mean-R, mean-G, mean-B) order if @p image has BGR ordering and @p swapRB is true.
  668. * @param scalefactor multiplier for @p images values.
  669. * in 3-channel image is necessary.
  670. * if @p crop is true, input image is resized so one side after resize is equal to corresponding
  671. * dimension in @p size and another one is equal or larger. Then, crop from the center is performed.
  672. * If @p crop is false, direct resize without cropping and preserving aspect ratio is performed.
  673. * @return 4-dimensional Mat with NCHW dimensions order.
  674. *
  675. * NOTE:
  676. * The order and usage of `scalefactor` and `mean` are (input - mean) * scalefactor.
  677. */
  678. + (Mat*)blobFromImages:(NSArray<Mat*>*)images scalefactor:(double)scalefactor size:(Size2i*)size NS_SWIFT_NAME(blobFromImages(images:scalefactor:size:));
  679. /**
  680. * Creates 4-dimensional blob from series of images. Optionally resizes and
  681. * crops @p images from center, subtract @p mean values, scales values by @p scalefactor,
  682. * swap Blue and Red channels.
  683. * @param images input images (all with 1-, 3- or 4-channels).
  684. * to be in (mean-R, mean-G, mean-B) order if @p image has BGR ordering and @p swapRB is true.
  685. * @param scalefactor multiplier for @p images values.
  686. * in 3-channel image is necessary.
  687. * if @p crop is true, input image is resized so one side after resize is equal to corresponding
  688. * dimension in @p size and another one is equal or larger. Then, crop from the center is performed.
  689. * If @p crop is false, direct resize without cropping and preserving aspect ratio is performed.
  690. * @return 4-dimensional Mat with NCHW dimensions order.
  691. *
  692. * NOTE:
  693. * The order and usage of `scalefactor` and `mean` are (input - mean) * scalefactor.
  694. */
  695. + (Mat*)blobFromImages:(NSArray<Mat*>*)images scalefactor:(double)scalefactor NS_SWIFT_NAME(blobFromImages(images:scalefactor:));
  696. /**
  697. * Creates 4-dimensional blob from series of images. Optionally resizes and
  698. * crops @p images from center, subtract @p mean values, scales values by @p scalefactor,
  699. * swap Blue and Red channels.
  700. * @param images input images (all with 1-, 3- or 4-channels).
  701. * to be in (mean-R, mean-G, mean-B) order if @p image has BGR ordering and @p swapRB is true.
  702. * in 3-channel image is necessary.
  703. * if @p crop is true, input image is resized so one side after resize is equal to corresponding
  704. * dimension in @p size and another one is equal or larger. Then, crop from the center is performed.
  705. * If @p crop is false, direct resize without cropping and preserving aspect ratio is performed.
  706. * @return 4-dimensional Mat with NCHW dimensions order.
  707. *
  708. * NOTE:
  709. * The order and usage of `scalefactor` and `mean` are (input - mean) * scalefactor.
  710. */
  711. + (Mat*)blobFromImages:(NSArray<Mat*>*)images NS_SWIFT_NAME(blobFromImages(images:));
  712. //
  713. // Mat cv::dnn::blobFromImageWithParams(Mat image, Image2BlobParams param = Image2BlobParams())
  714. //
  715. /**
  716. * Creates 4-dimensional blob from image with given params.
  717. *
  718. * This function is an extension of REF: blobFromImage to meet more image preprocess needs.
  719. * Given input image and preprocessing parameters, and function outputs the blob.
  720. *
  721. * @param image input image (all with 1-, 3- or 4-channels).
  722. * @param param struct of Image2BlobParams, contains all parameters needed by processing of image to blob.
  723. * @return 4-dimensional Mat.
  724. */
  725. + (Mat*)blobFromImageWithParams:(Mat*)image param:(Image2BlobParams*)param NS_SWIFT_NAME(blobFromImageWithParams(image:param:));
  726. /**
  727. * Creates 4-dimensional blob from image with given params.
  728. *
  729. * This function is an extension of REF: blobFromImage to meet more image preprocess needs.
  730. * Given input image and preprocessing parameters, and function outputs the blob.
  731. *
  732. * @param image input image (all with 1-, 3- or 4-channels).
  733. * @return 4-dimensional Mat.
  734. */
  735. + (Mat*)blobFromImageWithParams:(Mat*)image NS_SWIFT_NAME(blobFromImageWithParams(image:));
  736. //
  737. // void cv::dnn::blobFromImageWithParams(Mat image, Mat& blob, Image2BlobParams param = Image2BlobParams())
  738. //
  739. + (void)blobFromImageWithParams:(Mat*)image blob:(Mat*)blob param:(Image2BlobParams*)param NS_SWIFT_NAME(blobFromImageWithParams(image:blob:param:));
  740. + (void)blobFromImageWithParams:(Mat*)image blob:(Mat*)blob NS_SWIFT_NAME(blobFromImageWithParams(image:blob:));
  741. //
  742. // Mat cv::dnn::blobFromImagesWithParams(vector_Mat images, Image2BlobParams param = Image2BlobParams())
  743. //
  744. /**
  745. * Creates 4-dimensional blob from series of images with given params.
  746. *
  747. * This function is an extension of REF: blobFromImages to meet more image preprocess needs.
  748. * Given input image and preprocessing parameters, and function outputs the blob.
  749. *
  750. * @param images input image (all with 1-, 3- or 4-channels).
  751. * @param param struct of Image2BlobParams, contains all parameters needed by processing of image to blob.
  752. * @return 4-dimensional Mat.
  753. */
  754. + (Mat*)blobFromImagesWithParams:(NSArray<Mat*>*)images param:(Image2BlobParams*)param NS_SWIFT_NAME(blobFromImagesWithParams(images:param:));
  755. /**
  756. * Creates 4-dimensional blob from series of images with given params.
  757. *
  758. * This function is an extension of REF: blobFromImages to meet more image preprocess needs.
  759. * Given input image and preprocessing parameters, and function outputs the blob.
  760. *
  761. * @param images input image (all with 1-, 3- or 4-channels).
  762. * @return 4-dimensional Mat.
  763. */
  764. + (Mat*)blobFromImagesWithParams:(NSArray<Mat*>*)images NS_SWIFT_NAME(blobFromImagesWithParams(images:));
  765. //
  766. // void cv::dnn::blobFromImagesWithParams(vector_Mat images, Mat& blob, Image2BlobParams param = Image2BlobParams())
  767. //
  768. + (void)blobFromImagesWithParams:(NSArray<Mat*>*)images blob:(Mat*)blob param:(Image2BlobParams*)param NS_SWIFT_NAME(blobFromImagesWithParams(images:blob:param:));
  769. + (void)blobFromImagesWithParams:(NSArray<Mat*>*)images blob:(Mat*)blob NS_SWIFT_NAME(blobFromImagesWithParams(images:blob:));
  770. //
  771. // void cv::dnn::imagesFromBlob(Mat blob_, vector_Mat& images_)
  772. //
  773. /**
  774. * Parse a 4D blob and output the images it contains as 2D arrays through a simpler data structure
  775. * (std::vector<cv::Mat>).
  776. * @param blob_ 4 dimensional array (images, channels, height, width) in floating point precision (CV_32F) from
  777. * which you would like to extract the images.
  778. * @param images_ array of 2D Mat containing the images extracted from the blob in floating point precision
  779. * (CV_32F). They are non normalized neither mean added. The number of returned images equals the first dimension
  780. * of the blob (batch size). Every image has a number of channels equals to the second dimension of the blob (depth).
  781. */
  782. + (void)imagesFromBlob:(Mat*)blob_ images_:(NSMutableArray<Mat*>*)images_ NS_SWIFT_NAME(imagesFromBlob(blob_:images_:));
  783. //
  784. // void cv::dnn::shrinkCaffeModel(String src, String dst, vector_String layersTypes = std::vector<String>())
  785. //
  786. /**
  787. * Convert all weights of Caffe network to half precision floating point.
  788. * @param src Path to origin model from Caffe framework contains single
  789. * precision floating point weights (usually has `.caffemodel` extension).
  790. * @param dst Path to destination model with updated weights.
  791. * @param layersTypes Set of layers types which parameters will be converted.
  792. * By default, converts only Convolutional and Fully-Connected layers'
  793. * weights.
  794. *
  795. * NOTE: Shrinked model has no origin float32 weights so it can't be used
  796. * in origin Caffe framework anymore. However the structure of data
  797. * is taken from NVidia's Caffe fork: https://github.com/NVIDIA/caffe.
  798. * So the resulting model may be used there.
  799. */
  800. + (void)shrinkCaffeModel:(NSString*)src dst:(NSString*)dst layersTypes:(NSArray<NSString*>*)layersTypes NS_SWIFT_NAME(shrinkCaffeModel(src:dst:layersTypes:));
  801. /**
  802. * Convert all weights of Caffe network to half precision floating point.
  803. * @param src Path to origin model from Caffe framework contains single
  804. * precision floating point weights (usually has `.caffemodel` extension).
  805. * @param dst Path to destination model with updated weights.
  806. * By default, converts only Convolutional and Fully-Connected layers'
  807. * weights.
  808. *
  809. * NOTE: Shrinked model has no origin float32 weights so it can't be used
  810. * in origin Caffe framework anymore. However the structure of data
  811. * is taken from NVidia's Caffe fork: https://github.com/NVIDIA/caffe.
  812. * So the resulting model may be used there.
  813. */
  814. + (void)shrinkCaffeModel:(NSString*)src dst:(NSString*)dst NS_SWIFT_NAME(shrinkCaffeModel(src:dst:));
  815. //
  816. // void cv::dnn::writeTextGraph(String model, String output)
  817. //
  818. /**
  819. * Create a text representation for a binary network stored in protocol buffer format.
  820. * @param model A path to binary network.
  821. * @param output A path to output text file to be created.
  822. *
  823. * NOTE: To reduce output file size, trained weights are not included.
  824. */
  825. + (void)writeTextGraph:(NSString*)model output:(NSString*)output NS_SWIFT_NAME(writeTextGraph(model:output:));
  826. //
  827. // void cv::dnn::NMSBoxes(vector_Rect2d bboxes, vector_float scores, float score_threshold, float nms_threshold, vector_int& indices, float eta = 1.f, int top_k = 0)
  828. //
  829. /**
  830. * Performs non maximum suppression given boxes and corresponding scores.
  831. *
  832. * @param bboxes a set of bounding boxes to apply NMS.
  833. * @param scores a set of corresponding confidences.
  834. * @param score_threshold a threshold used to filter boxes by score.
  835. * @param nms_threshold a threshold used in non maximum suppression.
  836. * @param indices the kept indices of bboxes after NMS.
  837. * @param eta a coefficient in adaptive threshold formula: `$$nms\_threshold_{i+1}=eta\cdot nms\_threshold_i$$`.
  838. * @param top_k if `>0`, keep at most @p top_k picked indices.
  839. */
  840. + (void)NMSBoxes:(NSArray<Rect2d*>*)bboxes scores:(FloatVector*)scores score_threshold:(float)score_threshold nms_threshold:(float)nms_threshold indices:(IntVector*)indices eta:(float)eta top_k:(int)top_k NS_SWIFT_NAME(NMSBoxes(bboxes:scores:score_threshold:nms_threshold:indices:eta:top_k:));
  841. /**
  842. * Performs non maximum suppression given boxes and corresponding scores.
  843. *
  844. * @param bboxes a set of bounding boxes to apply NMS.
  845. * @param scores a set of corresponding confidences.
  846. * @param score_threshold a threshold used to filter boxes by score.
  847. * @param nms_threshold a threshold used in non maximum suppression.
  848. * @param indices the kept indices of bboxes after NMS.
  849. * @param eta a coefficient in adaptive threshold formula: `$$nms\_threshold_{i+1}=eta\cdot nms\_threshold_i$$`.
  850. */
  851. + (void)NMSBoxes:(NSArray<Rect2d*>*)bboxes scores:(FloatVector*)scores score_threshold:(float)score_threshold nms_threshold:(float)nms_threshold indices:(IntVector*)indices eta:(float)eta NS_SWIFT_NAME(NMSBoxes(bboxes:scores:score_threshold:nms_threshold:indices:eta:));
  852. /**
  853. * Performs non maximum suppression given boxes and corresponding scores.
  854. *
  855. * @param bboxes a set of bounding boxes to apply NMS.
  856. * @param scores a set of corresponding confidences.
  857. * @param score_threshold a threshold used to filter boxes by score.
  858. * @param nms_threshold a threshold used in non maximum suppression.
  859. * @param indices the kept indices of bboxes after NMS.
  860. */
  861. + (void)NMSBoxes:(NSArray<Rect2d*>*)bboxes scores:(FloatVector*)scores score_threshold:(float)score_threshold nms_threshold:(float)nms_threshold indices:(IntVector*)indices NS_SWIFT_NAME(NMSBoxes(bboxes:scores:score_threshold:nms_threshold:indices:));
  862. //
  863. // void cv::dnn::NMSBoxes(vector_RotatedRect bboxes, vector_float scores, float score_threshold, float nms_threshold, vector_int& indices, float eta = 1.f, int top_k = 0)
  864. //
  865. + (void)NMSBoxesRotated:(NSArray<RotatedRect*>*)bboxes scores:(FloatVector*)scores score_threshold:(float)score_threshold nms_threshold:(float)nms_threshold indices:(IntVector*)indices eta:(float)eta top_k:(int)top_k NS_SWIFT_NAME(NMSBoxes(bboxes:scores:score_threshold:nms_threshold:indices:eta:top_k:));
  866. + (void)NMSBoxesRotated:(NSArray<RotatedRect*>*)bboxes scores:(FloatVector*)scores score_threshold:(float)score_threshold nms_threshold:(float)nms_threshold indices:(IntVector*)indices eta:(float)eta NS_SWIFT_NAME(NMSBoxes(bboxes:scores:score_threshold:nms_threshold:indices:eta:));
  867. + (void)NMSBoxesRotated:(NSArray<RotatedRect*>*)bboxes scores:(FloatVector*)scores score_threshold:(float)score_threshold nms_threshold:(float)nms_threshold indices:(IntVector*)indices NS_SWIFT_NAME(NMSBoxes(bboxes:scores:score_threshold:nms_threshold:indices:));
  868. //
  869. // void cv::dnn::NMSBoxesBatched(vector_Rect2d bboxes, vector_float scores, vector_int class_ids, float score_threshold, float nms_threshold, vector_int& indices, float eta = 1.f, int top_k = 0)
  870. //
  871. /**
  872. * Performs batched non maximum suppression on given boxes and corresponding scores across different classes.
  873. *
  874. * @param bboxes a set of bounding boxes to apply NMS.
  875. * @param scores a set of corresponding confidences.
  876. * @param class_ids a set of corresponding class ids. Ids are integer and usually start from 0.
  877. * @param score_threshold a threshold used to filter boxes by score.
  878. * @param nms_threshold a threshold used in non maximum suppression.
  879. * @param indices the kept indices of bboxes after NMS.
  880. * @param eta a coefficient in adaptive threshold formula: `$$nms\_threshold_{i+1}=eta\cdot nms\_threshold_i$$`.
  881. * @param top_k if `>0`, keep at most @p top_k picked indices.
  882. */
  883. + (void)NMSBoxesBatched:(NSArray<Rect2d*>*)bboxes scores:(FloatVector*)scores class_ids:(IntVector*)class_ids score_threshold:(float)score_threshold nms_threshold:(float)nms_threshold indices:(IntVector*)indices eta:(float)eta top_k:(int)top_k NS_SWIFT_NAME(NMSBoxesBatched(bboxes:scores:class_ids:score_threshold:nms_threshold:indices:eta:top_k:));
  884. /**
  885. * Performs batched non maximum suppression on given boxes and corresponding scores across different classes.
  886. *
  887. * @param bboxes a set of bounding boxes to apply NMS.
  888. * @param scores a set of corresponding confidences.
  889. * @param class_ids a set of corresponding class ids. Ids are integer and usually start from 0.
  890. * @param score_threshold a threshold used to filter boxes by score.
  891. * @param nms_threshold a threshold used in non maximum suppression.
  892. * @param indices the kept indices of bboxes after NMS.
  893. * @param eta a coefficient in adaptive threshold formula: `$$nms\_threshold_{i+1}=eta\cdot nms\_threshold_i$$`.
  894. */
  895. + (void)NMSBoxesBatched:(NSArray<Rect2d*>*)bboxes scores:(FloatVector*)scores class_ids:(IntVector*)class_ids score_threshold:(float)score_threshold nms_threshold:(float)nms_threshold indices:(IntVector*)indices eta:(float)eta NS_SWIFT_NAME(NMSBoxesBatched(bboxes:scores:class_ids:score_threshold:nms_threshold:indices:eta:));
  896. /**
  897. * Performs batched non maximum suppression on given boxes and corresponding scores across different classes.
  898. *
  899. * @param bboxes a set of bounding boxes to apply NMS.
  900. * @param scores a set of corresponding confidences.
  901. * @param class_ids a set of corresponding class ids. Ids are integer and usually start from 0.
  902. * @param score_threshold a threshold used to filter boxes by score.
  903. * @param nms_threshold a threshold used in non maximum suppression.
  904. * @param indices the kept indices of bboxes after NMS.
  905. */
  906. + (void)NMSBoxesBatched:(NSArray<Rect2d*>*)bboxes scores:(FloatVector*)scores class_ids:(IntVector*)class_ids score_threshold:(float)score_threshold nms_threshold:(float)nms_threshold indices:(IntVector*)indices NS_SWIFT_NAME(NMSBoxesBatched(bboxes:scores:class_ids:score_threshold:nms_threshold:indices:));
  907. //
  908. // void cv::dnn::softNMSBoxes(vector_Rect bboxes, vector_float scores, vector_float& updated_scores, float score_threshold, float nms_threshold, vector_int& indices, size_t top_k = 0, float sigma = 0.5, SoftNMSMethod method = SoftNMSMethod::SOFTNMS_GAUSSIAN)
  909. //
  910. /**
  911. * Performs soft non maximum suppression given boxes and corresponding scores.
  912. * Reference: https://arxiv.org/abs/1704.04503
  913. * @param bboxes a set of bounding boxes to apply Soft NMS.
  914. * @param scores a set of corresponding confidences.
  915. * @param updated_scores a set of corresponding updated confidences.
  916. * @param score_threshold a threshold used to filter boxes by score.
  917. * @param nms_threshold a threshold used in non maximum suppression.
  918. * @param indices the kept indices of bboxes after NMS.
  919. * @param top_k keep at most @p top_k picked indices.
  920. * @param sigma parameter of Gaussian weighting.
  921. * @param method Gaussian or linear.
  922. * @see `SoftNMSMethod`
  923. */
  924. + (void)softNMSBoxes:(NSArray<Rect2i*>*)bboxes scores:(FloatVector*)scores updated_scores:(FloatVector*)updated_scores score_threshold:(float)score_threshold nms_threshold:(float)nms_threshold indices:(IntVector*)indices top_k:(size_t)top_k sigma:(float)sigma method:(SoftNMSMethod)method NS_SWIFT_NAME(softNMSBoxes(bboxes:scores:updated_scores:score_threshold:nms_threshold:indices:top_k:sigma:method:));
  925. /**
  926. * Performs soft non maximum suppression given boxes and corresponding scores.
  927. * Reference: https://arxiv.org/abs/1704.04503
  928. * @param bboxes a set of bounding boxes to apply Soft NMS.
  929. * @param scores a set of corresponding confidences.
  930. * @param updated_scores a set of corresponding updated confidences.
  931. * @param score_threshold a threshold used to filter boxes by score.
  932. * @param nms_threshold a threshold used in non maximum suppression.
  933. * @param indices the kept indices of bboxes after NMS.
  934. * @param top_k keep at most @p top_k picked indices.
  935. * @param sigma parameter of Gaussian weighting.
  936. * @see `SoftNMSMethod`
  937. */
  938. + (void)softNMSBoxes:(NSArray<Rect2i*>*)bboxes scores:(FloatVector*)scores updated_scores:(FloatVector*)updated_scores score_threshold:(float)score_threshold nms_threshold:(float)nms_threshold indices:(IntVector*)indices top_k:(size_t)top_k sigma:(float)sigma NS_SWIFT_NAME(softNMSBoxes(bboxes:scores:updated_scores:score_threshold:nms_threshold:indices:top_k:sigma:));
  939. /**
  940. * Performs soft non maximum suppression given boxes and corresponding scores.
  941. * Reference: https://arxiv.org/abs/1704.04503
  942. * @param bboxes a set of bounding boxes to apply Soft NMS.
  943. * @param scores a set of corresponding confidences.
  944. * @param updated_scores a set of corresponding updated confidences.
  945. * @param score_threshold a threshold used to filter boxes by score.
  946. * @param nms_threshold a threshold used in non maximum suppression.
  947. * @param indices the kept indices of bboxes after NMS.
  948. * @param top_k keep at most @p top_k picked indices.
  949. * @see `SoftNMSMethod`
  950. */
  951. + (void)softNMSBoxes:(NSArray<Rect2i*>*)bboxes scores:(FloatVector*)scores updated_scores:(FloatVector*)updated_scores score_threshold:(float)score_threshold nms_threshold:(float)nms_threshold indices:(IntVector*)indices top_k:(size_t)top_k NS_SWIFT_NAME(softNMSBoxes(bboxes:scores:updated_scores:score_threshold:nms_threshold:indices:top_k:));
  952. /**
  953. * Performs soft non maximum suppression given boxes and corresponding scores.
  954. * Reference: https://arxiv.org/abs/1704.04503
  955. * @param bboxes a set of bounding boxes to apply Soft NMS.
  956. * @param scores a set of corresponding confidences.
  957. * @param updated_scores a set of corresponding updated confidences.
  958. * @param score_threshold a threshold used to filter boxes by score.
  959. * @param nms_threshold a threshold used in non maximum suppression.
  960. * @param indices the kept indices of bboxes after NMS.
  961. * @see `SoftNMSMethod`
  962. */
  963. + (void)softNMSBoxes:(NSArray<Rect2i*>*)bboxes scores:(FloatVector*)scores updated_scores:(FloatVector*)updated_scores score_threshold:(float)score_threshold nms_threshold:(float)nms_threshold indices:(IntVector*)indices NS_SWIFT_NAME(softNMSBoxes(bboxes:scores:updated_scores:score_threshold:nms_threshold:indices:));
  964. //
  965. // String cv::dnn::getInferenceEngineBackendType()
  966. //
  967. /**
  968. * Returns Inference Engine internal backend API.
  969. *
  970. * See values of `CV_DNN_BACKEND_INFERENCE_ENGINE_*` macros.
  971. *
  972. * `OPENCV_DNN_BACKEND_INFERENCE_ENGINE_TYPE` runtime parameter (environment variable) is ignored since 4.6.0.
  973. *
  974. * @deprecated
  975. */
  976. + (NSString*)getInferenceEngineBackendType NS_SWIFT_NAME(getInferenceEngineBackendType()) DEPRECATED_ATTRIBUTE;
  977. //
  978. // String cv::dnn::setInferenceEngineBackendType(String newBackendType)
  979. //
  980. /**
  981. * Specify Inference Engine internal backend API.
  982. *
  983. * See values of `CV_DNN_BACKEND_INFERENCE_ENGINE_*` macros.
  984. *
  985. * @return previous value of internal backend API
  986. *
  987. * @deprecated
  988. */
  989. + (NSString*)setInferenceEngineBackendType:(NSString*)newBackendType NS_SWIFT_NAME(setInferenceEngineBackendType(newBackendType:)) DEPRECATED_ATTRIBUTE;
  990. //
  991. // void cv::dnn::resetMyriadDevice()
  992. //
  993. /**
  994. * Release a Myriad device (binded by OpenCV).
  995. *
  996. * Single Myriad device cannot be shared across multiple processes which uses
  997. * Inference Engine's Myriad plugin.
  998. */
  999. + (void)resetMyriadDevice NS_SWIFT_NAME(resetMyriadDevice());
  1000. //
  1001. // String cv::dnn::getInferenceEngineVPUType()
  1002. //
  1003. /**
  1004. * Returns Inference Engine VPU type.
  1005. *
  1006. * See values of `CV_DNN_INFERENCE_ENGINE_VPU_TYPE_*` macros.
  1007. */
  1008. + (NSString*)getInferenceEngineVPUType NS_SWIFT_NAME(getInferenceEngineVPUType());
  1009. //
  1010. // String cv::dnn::getInferenceEngineCPUType()
  1011. //
  1012. /**
  1013. * Returns Inference Engine CPU type.
  1014. *
  1015. * Specify OpenVINO plugin: CPU or ARM.
  1016. */
  1017. + (NSString*)getInferenceEngineCPUType NS_SWIFT_NAME(getInferenceEngineCPUType());
  1018. //
  1019. // void cv::dnn::releaseHDDLPlugin()
  1020. //
  1021. /**
  1022. * Release a HDDL plugin.
  1023. */
  1024. + (void)releaseHDDLPlugin NS_SWIFT_NAME(releaseHDDLPlugin());
  1025. @end
  1026. NS_ASSUME_NONNULL_END