EM.cs 50 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046
  1. using OpenCVForUnity.CoreModule;
  2. using OpenCVForUnity.UtilsModule;
  3. using System;
  4. using System.Collections.Generic;
  5. using System.Runtime.InteropServices;
  6. namespace OpenCVForUnity.MlModule
  7. {
  8. // C++: class EM
  9. /**
  10. * The class implements the Expectation Maximization algorithm.
  11. *
  12. * SEE: REF: ml_intro_em
  13. */
  14. public class EM : StatModel
  15. {
  16. protected override void Dispose(bool disposing)
  17. {
  18. try
  19. {
  20. if (disposing)
  21. {
  22. }
  23. if (IsEnabledDispose)
  24. {
  25. if (nativeObj != IntPtr.Zero)
  26. ml_EM_delete(nativeObj);
  27. nativeObj = IntPtr.Zero;
  28. }
  29. }
  30. finally
  31. {
  32. base.Dispose(disposing);
  33. }
  34. }
  35. protected internal EM(IntPtr addr) : base(addr) { }
  36. // internal usage only
  37. public static new EM __fromPtr__(IntPtr addr) { return new EM(addr); }
  38. // C++: enum <unnamed>
  39. public const int DEFAULT_NCLUSTERS = 5;
  40. public const int DEFAULT_MAX_ITERS = 100;
  41. public const int START_E_STEP = 1;
  42. public const int START_M_STEP = 2;
  43. public const int START_AUTO_STEP = 0;
  44. // C++: enum cv.ml.EM.Types
  45. public const int COV_MAT_SPHERICAL = 0;
  46. public const int COV_MAT_DIAGONAL = 1;
  47. public const int COV_MAT_GENERIC = 2;
  48. public const int COV_MAT_DEFAULT = COV_MAT_DIAGONAL;
  49. //
  50. // C++: int cv::ml::EM::getClustersNumber()
  51. //
  52. /**
  53. * SEE: setClustersNumber
  54. * return automatically generated
  55. */
  56. public int getClustersNumber()
  57. {
  58. ThrowIfDisposed();
  59. return ml_EM_getClustersNumber_10(nativeObj);
  60. }
  61. //
  62. // C++: void cv::ml::EM::setClustersNumber(int val)
  63. //
  64. /**
  65. * getClustersNumber SEE: getClustersNumber
  66. * param val automatically generated
  67. */
  68. public void setClustersNumber(int val)
  69. {
  70. ThrowIfDisposed();
  71. ml_EM_setClustersNumber_10(nativeObj, val);
  72. }
  73. //
  74. // C++: int cv::ml::EM::getCovarianceMatrixType()
  75. //
  76. /**
  77. * SEE: setCovarianceMatrixType
  78. * return automatically generated
  79. */
  80. public int getCovarianceMatrixType()
  81. {
  82. ThrowIfDisposed();
  83. return ml_EM_getCovarianceMatrixType_10(nativeObj);
  84. }
  85. //
  86. // C++: void cv::ml::EM::setCovarianceMatrixType(int val)
  87. //
  88. /**
  89. * getCovarianceMatrixType SEE: getCovarianceMatrixType
  90. * param val automatically generated
  91. */
  92. public void setCovarianceMatrixType(int val)
  93. {
  94. ThrowIfDisposed();
  95. ml_EM_setCovarianceMatrixType_10(nativeObj, val);
  96. }
  97. //
  98. // C++: TermCriteria cv::ml::EM::getTermCriteria()
  99. //
  100. /**
  101. * SEE: setTermCriteria
  102. * return automatically generated
  103. */
  104. public TermCriteria getTermCriteria()
  105. {
  106. ThrowIfDisposed();
  107. double[] tmpArray = new double[3];
  108. ml_EM_getTermCriteria_10(nativeObj, tmpArray);
  109. TermCriteria retVal = new TermCriteria(tmpArray);
  110. return retVal;
  111. }
  112. //
  113. // C++: void cv::ml::EM::setTermCriteria(TermCriteria val)
  114. //
  115. /**
  116. * getTermCriteria SEE: getTermCriteria
  117. * param val automatically generated
  118. */
  119. public void setTermCriteria(TermCriteria val)
  120. {
  121. ThrowIfDisposed();
  122. ml_EM_setTermCriteria_10(nativeObj, val.type, val.maxCount, val.epsilon);
  123. }
  124. //
  125. // C++: Mat cv::ml::EM::getWeights()
  126. //
  127. /**
  128. * Returns weights of the mixtures
  129. *
  130. * Returns vector with the number of elements equal to the number of mixtures.
  131. * return automatically generated
  132. */
  133. public Mat getWeights()
  134. {
  135. ThrowIfDisposed();
  136. return new Mat(DisposableObject.ThrowIfNullIntPtr(ml_EM_getWeights_10(nativeObj)));
  137. }
  138. //
  139. // C++: Mat cv::ml::EM::getMeans()
  140. //
  141. /**
  142. * Returns the cluster centers (means of the Gaussian mixture)
  143. *
  144. * Returns matrix with the number of rows equal to the number of mixtures and number of columns
  145. * equal to the space dimensionality.
  146. * return automatically generated
  147. */
  148. public Mat getMeans()
  149. {
  150. ThrowIfDisposed();
  151. return new Mat(DisposableObject.ThrowIfNullIntPtr(ml_EM_getMeans_10(nativeObj)));
  152. }
  153. //
  154. // C++: void cv::ml::EM::getCovs(vector_Mat& covs)
  155. //
  156. /**
  157. * Returns covariation matrices
  158. *
  159. * Returns vector of covariation matrices. Number of matrices is the number of gaussian mixtures,
  160. * each matrix is a square floating-point matrix NxN, where N is the space dimensionality.
  161. * param covs automatically generated
  162. */
  163. public void getCovs(List<Mat> covs)
  164. {
  165. ThrowIfDisposed();
  166. Mat covs_mat = new Mat();
  167. ml_EM_getCovs_10(nativeObj, covs_mat.nativeObj);
  168. Converters.Mat_to_vector_Mat(covs_mat, covs);
  169. covs_mat.release();
  170. }
  171. //
  172. // C++: float cv::ml::EM::predict(Mat samples, Mat& results = Mat(), int flags = 0)
  173. //
  174. /**
  175. * Returns posterior probabilities for the provided samples
  176. *
  177. * param samples The input samples, floating-point matrix
  178. * param results The optional output \( nSamples \times nClusters\) matrix of results. It contains
  179. * posterior probabilities for each sample from the input
  180. * param flags This parameter will be ignored
  181. * return automatically generated
  182. */
  183. public override float predict(Mat samples, Mat results, int flags)
  184. {
  185. ThrowIfDisposed();
  186. if (samples != null) samples.ThrowIfDisposed();
  187. if (results != null) results.ThrowIfDisposed();
  188. return ml_EM_predict_10(nativeObj, samples.nativeObj, results.nativeObj, flags);
  189. }
  190. /**
  191. * Returns posterior probabilities for the provided samples
  192. *
  193. * param samples The input samples, floating-point matrix
  194. * param results The optional output \( nSamples \times nClusters\) matrix of results. It contains
  195. * posterior probabilities for each sample from the input
  196. * return automatically generated
  197. */
  198. public override float predict(Mat samples, Mat results)
  199. {
  200. ThrowIfDisposed();
  201. if (samples != null) samples.ThrowIfDisposed();
  202. if (results != null) results.ThrowIfDisposed();
  203. return ml_EM_predict_11(nativeObj, samples.nativeObj, results.nativeObj);
  204. }
  205. /**
  206. * Returns posterior probabilities for the provided samples
  207. *
  208. * param samples The input samples, floating-point matrix
  209. * posterior probabilities for each sample from the input
  210. * return automatically generated
  211. */
  212. public override float predict(Mat samples)
  213. {
  214. ThrowIfDisposed();
  215. if (samples != null) samples.ThrowIfDisposed();
  216. return ml_EM_predict_12(nativeObj, samples.nativeObj);
  217. }
  218. //
  219. // C++: Vec2d cv::ml::EM::predict2(Mat sample, Mat& probs)
  220. //
  221. /**
  222. * Returns a likelihood logarithm value and an index of the most probable mixture component
  223. * for the given sample.
  224. *
  225. * param sample A sample for classification. It should be a one-channel matrix of
  226. * \(1 \times dims\) or \(dims \times 1\) size.
  227. * param probs Optional output matrix that contains posterior probabilities of each component
  228. * given the sample. It has \(1 \times nclusters\) size and CV_64FC1 type.
  229. *
  230. * The method returns a two-element double vector. Zero element is a likelihood logarithm value for
  231. * the sample. First element is an index of the most probable mixture component for the given
  232. * sample.
  233. * return automatically generated
  234. */
  235. public double[] predict2(Mat sample, Mat probs)
  236. {
  237. ThrowIfDisposed();
  238. if (sample != null) sample.ThrowIfDisposed();
  239. if (probs != null) probs.ThrowIfDisposed();
  240. double[] retVal = new double[2];
  241. ml_EM_predict2_10(nativeObj, sample.nativeObj, probs.nativeObj, retVal);
  242. return retVal;
  243. }
  244. //
  245. // C++: bool cv::ml::EM::trainEM(Mat samples, Mat& logLikelihoods = Mat(), Mat& labels = Mat(), Mat& probs = Mat())
  246. //
  247. /**
  248. * Estimate the Gaussian mixture parameters from a samples set.
  249. *
  250. * This variation starts with Expectation step. Initial values of the model parameters will be
  251. * estimated by the k-means algorithm.
  252. *
  253. * Unlike many of the ML models, %EM is an unsupervised learning algorithm and it does not take
  254. * responses (class labels or function values) as input. Instead, it computes the *Maximum
  255. * Likelihood Estimate* of the Gaussian mixture parameters from an input sample set, stores all the
  256. * parameters inside the structure: \(p_{i,k}\) in probs, \(a_k\) in means , \(S_k\) in
  257. * covs[k], \(\pi_k\) in weights , and optionally computes the output "class label" for each
  258. * sample: \(\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N\) (indices of the most
  259. * probable mixture component for each sample).
  260. *
  261. * The trained model can be used further for prediction, just like any other classifier. The
  262. * trained model is similar to the NormalBayesClassifier.
  263. *
  264. * param samples Samples from which the Gaussian mixture model will be estimated. It should be a
  265. * one-channel matrix, each row of which is a sample. If the matrix does not have CV_64F type
  266. * it will be converted to the inner matrix of such type for the further computing.
  267. * param logLikelihoods The optional output matrix that contains a likelihood logarithm value for
  268. * each sample. It has \(nsamples \times 1\) size and CV_64FC1 type.
  269. * param labels The optional output "class label" for each sample:
  270. * \(\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N\) (indices of the most probable
  271. * mixture component for each sample). It has \(nsamples \times 1\) size and CV_32SC1 type.
  272. * param probs The optional output matrix that contains posterior probabilities of each Gaussian
  273. * mixture component given the each sample. It has \(nsamples \times nclusters\) size and
  274. * CV_64FC1 type.
  275. * return automatically generated
  276. */
  277. public bool trainEM(Mat samples, Mat logLikelihoods, Mat labels, Mat probs)
  278. {
  279. ThrowIfDisposed();
  280. if (samples != null) samples.ThrowIfDisposed();
  281. if (logLikelihoods != null) logLikelihoods.ThrowIfDisposed();
  282. if (labels != null) labels.ThrowIfDisposed();
  283. if (probs != null) probs.ThrowIfDisposed();
  284. return ml_EM_trainEM_10(nativeObj, samples.nativeObj, logLikelihoods.nativeObj, labels.nativeObj, probs.nativeObj);
  285. }
  286. /**
  287. * Estimate the Gaussian mixture parameters from a samples set.
  288. *
  289. * This variation starts with Expectation step. Initial values of the model parameters will be
  290. * estimated by the k-means algorithm.
  291. *
  292. * Unlike many of the ML models, %EM is an unsupervised learning algorithm and it does not take
  293. * responses (class labels or function values) as input. Instead, it computes the *Maximum
  294. * Likelihood Estimate* of the Gaussian mixture parameters from an input sample set, stores all the
  295. * parameters inside the structure: \(p_{i,k}\) in probs, \(a_k\) in means , \(S_k\) in
  296. * covs[k], \(\pi_k\) in weights , and optionally computes the output "class label" for each
  297. * sample: \(\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N\) (indices of the most
  298. * probable mixture component for each sample).
  299. *
  300. * The trained model can be used further for prediction, just like any other classifier. The
  301. * trained model is similar to the NormalBayesClassifier.
  302. *
  303. * param samples Samples from which the Gaussian mixture model will be estimated. It should be a
  304. * one-channel matrix, each row of which is a sample. If the matrix does not have CV_64F type
  305. * it will be converted to the inner matrix of such type for the further computing.
  306. * param logLikelihoods The optional output matrix that contains a likelihood logarithm value for
  307. * each sample. It has \(nsamples \times 1\) size and CV_64FC1 type.
  308. * param labels The optional output "class label" for each sample:
  309. * \(\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N\) (indices of the most probable
  310. * mixture component for each sample). It has \(nsamples \times 1\) size and CV_32SC1 type.
  311. * mixture component given the each sample. It has \(nsamples \times nclusters\) size and
  312. * CV_64FC1 type.
  313. * return automatically generated
  314. */
  315. public bool trainEM(Mat samples, Mat logLikelihoods, Mat labels)
  316. {
  317. ThrowIfDisposed();
  318. if (samples != null) samples.ThrowIfDisposed();
  319. if (logLikelihoods != null) logLikelihoods.ThrowIfDisposed();
  320. if (labels != null) labels.ThrowIfDisposed();
  321. return ml_EM_trainEM_11(nativeObj, samples.nativeObj, logLikelihoods.nativeObj, labels.nativeObj);
  322. }
  323. /**
  324. * Estimate the Gaussian mixture parameters from a samples set.
  325. *
  326. * This variation starts with Expectation step. Initial values of the model parameters will be
  327. * estimated by the k-means algorithm.
  328. *
  329. * Unlike many of the ML models, %EM is an unsupervised learning algorithm and it does not take
  330. * responses (class labels or function values) as input. Instead, it computes the *Maximum
  331. * Likelihood Estimate* of the Gaussian mixture parameters from an input sample set, stores all the
  332. * parameters inside the structure: \(p_{i,k}\) in probs, \(a_k\) in means , \(S_k\) in
  333. * covs[k], \(\pi_k\) in weights , and optionally computes the output "class label" for each
  334. * sample: \(\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N\) (indices of the most
  335. * probable mixture component for each sample).
  336. *
  337. * The trained model can be used further for prediction, just like any other classifier. The
  338. * trained model is similar to the NormalBayesClassifier.
  339. *
  340. * param samples Samples from which the Gaussian mixture model will be estimated. It should be a
  341. * one-channel matrix, each row of which is a sample. If the matrix does not have CV_64F type
  342. * it will be converted to the inner matrix of such type for the further computing.
  343. * param logLikelihoods The optional output matrix that contains a likelihood logarithm value for
  344. * each sample. It has \(nsamples \times 1\) size and CV_64FC1 type.
  345. * \(\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N\) (indices of the most probable
  346. * mixture component for each sample). It has \(nsamples \times 1\) size and CV_32SC1 type.
  347. * mixture component given the each sample. It has \(nsamples \times nclusters\) size and
  348. * CV_64FC1 type.
  349. * return automatically generated
  350. */
  351. public bool trainEM(Mat samples, Mat logLikelihoods)
  352. {
  353. ThrowIfDisposed();
  354. if (samples != null) samples.ThrowIfDisposed();
  355. if (logLikelihoods != null) logLikelihoods.ThrowIfDisposed();
  356. return ml_EM_trainEM_12(nativeObj, samples.nativeObj, logLikelihoods.nativeObj);
  357. }
  358. /**
  359. * Estimate the Gaussian mixture parameters from a samples set.
  360. *
  361. * This variation starts with Expectation step. Initial values of the model parameters will be
  362. * estimated by the k-means algorithm.
  363. *
  364. * Unlike many of the ML models, %EM is an unsupervised learning algorithm and it does not take
  365. * responses (class labels or function values) as input. Instead, it computes the *Maximum
  366. * Likelihood Estimate* of the Gaussian mixture parameters from an input sample set, stores all the
  367. * parameters inside the structure: \(p_{i,k}\) in probs, \(a_k\) in means , \(S_k\) in
  368. * covs[k], \(\pi_k\) in weights , and optionally computes the output "class label" for each
  369. * sample: \(\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N\) (indices of the most
  370. * probable mixture component for each sample).
  371. *
  372. * The trained model can be used further for prediction, just like any other classifier. The
  373. * trained model is similar to the NormalBayesClassifier.
  374. *
  375. * param samples Samples from which the Gaussian mixture model will be estimated. It should be a
  376. * one-channel matrix, each row of which is a sample. If the matrix does not have CV_64F type
  377. * it will be converted to the inner matrix of such type for the further computing.
  378. * each sample. It has \(nsamples \times 1\) size and CV_64FC1 type.
  379. * \(\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N\) (indices of the most probable
  380. * mixture component for each sample). It has \(nsamples \times 1\) size and CV_32SC1 type.
  381. * mixture component given the each sample. It has \(nsamples \times nclusters\) size and
  382. * CV_64FC1 type.
  383. * return automatically generated
  384. */
  385. public bool trainEM(Mat samples)
  386. {
  387. ThrowIfDisposed();
  388. if (samples != null) samples.ThrowIfDisposed();
  389. return ml_EM_trainEM_13(nativeObj, samples.nativeObj);
  390. }
  391. //
  392. // C++: bool cv::ml::EM::trainE(Mat samples, Mat means0, Mat covs0 = Mat(), Mat weights0 = Mat(), Mat& logLikelihoods = Mat(), Mat& labels = Mat(), Mat& probs = Mat())
  393. //
  394. /**
  395. * Estimate the Gaussian mixture parameters from a samples set.
  396. *
  397. * This variation starts with Expectation step. You need to provide initial means \(a_k\) of
  398. * mixture components. Optionally you can pass initial weights \(\pi_k\) and covariance matrices
  399. * \(S_k\) of mixture components.
  400. *
  401. * param samples Samples from which the Gaussian mixture model will be estimated. It should be a
  402. * one-channel matrix, each row of which is a sample. If the matrix does not have CV_64F type
  403. * it will be converted to the inner matrix of such type for the further computing.
  404. * param means0 Initial means \(a_k\) of mixture components. It is a one-channel matrix of
  405. * \(nclusters \times dims\) size. If the matrix does not have CV_64F type it will be
  406. * converted to the inner matrix of such type for the further computing.
  407. * param covs0 The vector of initial covariance matrices \(S_k\) of mixture components. Each of
  408. * covariance matrices is a one-channel matrix of \(dims \times dims\) size. If the matrices
  409. * do not have CV_64F type they will be converted to the inner matrices of such type for the
  410. * further computing.
  411. * param weights0 Initial weights \(\pi_k\) of mixture components. It should be a one-channel
  412. * floating-point matrix with \(1 \times nclusters\) or \(nclusters \times 1\) size.
  413. * param logLikelihoods The optional output matrix that contains a likelihood logarithm value for
  414. * each sample. It has \(nsamples \times 1\) size and CV_64FC1 type.
  415. * param labels The optional output "class label" for each sample:
  416. * \(\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N\) (indices of the most probable
  417. * mixture component for each sample). It has \(nsamples \times 1\) size and CV_32SC1 type.
  418. * param probs The optional output matrix that contains posterior probabilities of each Gaussian
  419. * mixture component given the each sample. It has \(nsamples \times nclusters\) size and
  420. * CV_64FC1 type.
  421. * return automatically generated
  422. */
  423. public bool trainE(Mat samples, Mat means0, Mat covs0, Mat weights0, Mat logLikelihoods, Mat labels, Mat probs)
  424. {
  425. ThrowIfDisposed();
  426. if (samples != null) samples.ThrowIfDisposed();
  427. if (means0 != null) means0.ThrowIfDisposed();
  428. if (covs0 != null) covs0.ThrowIfDisposed();
  429. if (weights0 != null) weights0.ThrowIfDisposed();
  430. if (logLikelihoods != null) logLikelihoods.ThrowIfDisposed();
  431. if (labels != null) labels.ThrowIfDisposed();
  432. if (probs != null) probs.ThrowIfDisposed();
  433. return ml_EM_trainE_10(nativeObj, samples.nativeObj, means0.nativeObj, covs0.nativeObj, weights0.nativeObj, logLikelihoods.nativeObj, labels.nativeObj, probs.nativeObj);
  434. }
  435. /**
  436. * Estimate the Gaussian mixture parameters from a samples set.
  437. *
  438. * This variation starts with Expectation step. You need to provide initial means \(a_k\) of
  439. * mixture components. Optionally you can pass initial weights \(\pi_k\) and covariance matrices
  440. * \(S_k\) of mixture components.
  441. *
  442. * param samples Samples from which the Gaussian mixture model will be estimated. It should be a
  443. * one-channel matrix, each row of which is a sample. If the matrix does not have CV_64F type
  444. * it will be converted to the inner matrix of such type for the further computing.
  445. * param means0 Initial means \(a_k\) of mixture components. It is a one-channel matrix of
  446. * \(nclusters \times dims\) size. If the matrix does not have CV_64F type it will be
  447. * converted to the inner matrix of such type for the further computing.
  448. * param covs0 The vector of initial covariance matrices \(S_k\) of mixture components. Each of
  449. * covariance matrices is a one-channel matrix of \(dims \times dims\) size. If the matrices
  450. * do not have CV_64F type they will be converted to the inner matrices of such type for the
  451. * further computing.
  452. * param weights0 Initial weights \(\pi_k\) of mixture components. It should be a one-channel
  453. * floating-point matrix with \(1 \times nclusters\) or \(nclusters \times 1\) size.
  454. * param logLikelihoods The optional output matrix that contains a likelihood logarithm value for
  455. * each sample. It has \(nsamples \times 1\) size and CV_64FC1 type.
  456. * param labels The optional output "class label" for each sample:
  457. * \(\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N\) (indices of the most probable
  458. * mixture component for each sample). It has \(nsamples \times 1\) size and CV_32SC1 type.
  459. * mixture component given the each sample. It has \(nsamples \times nclusters\) size and
  460. * CV_64FC1 type.
  461. * return automatically generated
  462. */
  463. public bool trainE(Mat samples, Mat means0, Mat covs0, Mat weights0, Mat logLikelihoods, Mat labels)
  464. {
  465. ThrowIfDisposed();
  466. if (samples != null) samples.ThrowIfDisposed();
  467. if (means0 != null) means0.ThrowIfDisposed();
  468. if (covs0 != null) covs0.ThrowIfDisposed();
  469. if (weights0 != null) weights0.ThrowIfDisposed();
  470. if (logLikelihoods != null) logLikelihoods.ThrowIfDisposed();
  471. if (labels != null) labels.ThrowIfDisposed();
  472. return ml_EM_trainE_11(nativeObj, samples.nativeObj, means0.nativeObj, covs0.nativeObj, weights0.nativeObj, logLikelihoods.nativeObj, labels.nativeObj);
  473. }
  474. /**
  475. * Estimate the Gaussian mixture parameters from a samples set.
  476. *
  477. * This variation starts with Expectation step. You need to provide initial means \(a_k\) of
  478. * mixture components. Optionally you can pass initial weights \(\pi_k\) and covariance matrices
  479. * \(S_k\) of mixture components.
  480. *
  481. * param samples Samples from which the Gaussian mixture model will be estimated. It should be a
  482. * one-channel matrix, each row of which is a sample. If the matrix does not have CV_64F type
  483. * it will be converted to the inner matrix of such type for the further computing.
  484. * param means0 Initial means \(a_k\) of mixture components. It is a one-channel matrix of
  485. * \(nclusters \times dims\) size. If the matrix does not have CV_64F type it will be
  486. * converted to the inner matrix of such type for the further computing.
  487. * param covs0 The vector of initial covariance matrices \(S_k\) of mixture components. Each of
  488. * covariance matrices is a one-channel matrix of \(dims \times dims\) size. If the matrices
  489. * do not have CV_64F type they will be converted to the inner matrices of such type for the
  490. * further computing.
  491. * param weights0 Initial weights \(\pi_k\) of mixture components. It should be a one-channel
  492. * floating-point matrix with \(1 \times nclusters\) or \(nclusters \times 1\) size.
  493. * param logLikelihoods The optional output matrix that contains a likelihood logarithm value for
  494. * each sample. It has \(nsamples \times 1\) size and CV_64FC1 type.
  495. * \(\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N\) (indices of the most probable
  496. * mixture component for each sample). It has \(nsamples \times 1\) size and CV_32SC1 type.
  497. * mixture component given the each sample. It has \(nsamples \times nclusters\) size and
  498. * CV_64FC1 type.
  499. * return automatically generated
  500. */
  501. public bool trainE(Mat samples, Mat means0, Mat covs0, Mat weights0, Mat logLikelihoods)
  502. {
  503. ThrowIfDisposed();
  504. if (samples != null) samples.ThrowIfDisposed();
  505. if (means0 != null) means0.ThrowIfDisposed();
  506. if (covs0 != null) covs0.ThrowIfDisposed();
  507. if (weights0 != null) weights0.ThrowIfDisposed();
  508. if (logLikelihoods != null) logLikelihoods.ThrowIfDisposed();
  509. return ml_EM_trainE_12(nativeObj, samples.nativeObj, means0.nativeObj, covs0.nativeObj, weights0.nativeObj, logLikelihoods.nativeObj);
  510. }
  511. /**
  512. * Estimate the Gaussian mixture parameters from a samples set.
  513. *
  514. * This variation starts with Expectation step. You need to provide initial means \(a_k\) of
  515. * mixture components. Optionally you can pass initial weights \(\pi_k\) and covariance matrices
  516. * \(S_k\) of mixture components.
  517. *
  518. * param samples Samples from which the Gaussian mixture model will be estimated. It should be a
  519. * one-channel matrix, each row of which is a sample. If the matrix does not have CV_64F type
  520. * it will be converted to the inner matrix of such type for the further computing.
  521. * param means0 Initial means \(a_k\) of mixture components. It is a one-channel matrix of
  522. * \(nclusters \times dims\) size. If the matrix does not have CV_64F type it will be
  523. * converted to the inner matrix of such type for the further computing.
  524. * param covs0 The vector of initial covariance matrices \(S_k\) of mixture components. Each of
  525. * covariance matrices is a one-channel matrix of \(dims \times dims\) size. If the matrices
  526. * do not have CV_64F type they will be converted to the inner matrices of such type for the
  527. * further computing.
  528. * param weights0 Initial weights \(\pi_k\) of mixture components. It should be a one-channel
  529. * floating-point matrix with \(1 \times nclusters\) or \(nclusters \times 1\) size.
  530. * each sample. It has \(nsamples \times 1\) size and CV_64FC1 type.
  531. * \(\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N\) (indices of the most probable
  532. * mixture component for each sample). It has \(nsamples \times 1\) size and CV_32SC1 type.
  533. * mixture component given the each sample. It has \(nsamples \times nclusters\) size and
  534. * CV_64FC1 type.
  535. * return automatically generated
  536. */
  537. public bool trainE(Mat samples, Mat means0, Mat covs0, Mat weights0)
  538. {
  539. ThrowIfDisposed();
  540. if (samples != null) samples.ThrowIfDisposed();
  541. if (means0 != null) means0.ThrowIfDisposed();
  542. if (covs0 != null) covs0.ThrowIfDisposed();
  543. if (weights0 != null) weights0.ThrowIfDisposed();
  544. return ml_EM_trainE_13(nativeObj, samples.nativeObj, means0.nativeObj, covs0.nativeObj, weights0.nativeObj);
  545. }
  546. /**
  547. * Estimate the Gaussian mixture parameters from a samples set.
  548. *
  549. * This variation starts with Expectation step. You need to provide initial means \(a_k\) of
  550. * mixture components. Optionally you can pass initial weights \(\pi_k\) and covariance matrices
  551. * \(S_k\) of mixture components.
  552. *
  553. * param samples Samples from which the Gaussian mixture model will be estimated. It should be a
  554. * one-channel matrix, each row of which is a sample. If the matrix does not have CV_64F type
  555. * it will be converted to the inner matrix of such type for the further computing.
  556. * param means0 Initial means \(a_k\) of mixture components. It is a one-channel matrix of
  557. * \(nclusters \times dims\) size. If the matrix does not have CV_64F type it will be
  558. * converted to the inner matrix of such type for the further computing.
  559. * param covs0 The vector of initial covariance matrices \(S_k\) of mixture components. Each of
  560. * covariance matrices is a one-channel matrix of \(dims \times dims\) size. If the matrices
  561. * do not have CV_64F type they will be converted to the inner matrices of such type for the
  562. * further computing.
  563. * floating-point matrix with \(1 \times nclusters\) or \(nclusters \times 1\) size.
  564. * each sample. It has \(nsamples \times 1\) size and CV_64FC1 type.
  565. * \(\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N\) (indices of the most probable
  566. * mixture component for each sample). It has \(nsamples \times 1\) size and CV_32SC1 type.
  567. * mixture component given the each sample. It has \(nsamples \times nclusters\) size and
  568. * CV_64FC1 type.
  569. * return automatically generated
  570. */
  571. public bool trainE(Mat samples, Mat means0, Mat covs0)
  572. {
  573. ThrowIfDisposed();
  574. if (samples != null) samples.ThrowIfDisposed();
  575. if (means0 != null) means0.ThrowIfDisposed();
  576. if (covs0 != null) covs0.ThrowIfDisposed();
  577. return ml_EM_trainE_14(nativeObj, samples.nativeObj, means0.nativeObj, covs0.nativeObj);
  578. }
  579. /**
  580. * Estimate the Gaussian mixture parameters from a samples set.
  581. *
  582. * This variation starts with Expectation step. You need to provide initial means \(a_k\) of
  583. * mixture components. Optionally you can pass initial weights \(\pi_k\) and covariance matrices
  584. * \(S_k\) of mixture components.
  585. *
  586. * param samples Samples from which the Gaussian mixture model will be estimated. It should be a
  587. * one-channel matrix, each row of which is a sample. If the matrix does not have CV_64F type
  588. * it will be converted to the inner matrix of such type for the further computing.
  589. * param means0 Initial means \(a_k\) of mixture components. It is a one-channel matrix of
  590. * \(nclusters \times dims\) size. If the matrix does not have CV_64F type it will be
  591. * converted to the inner matrix of such type for the further computing.
  592. * covariance matrices is a one-channel matrix of \(dims \times dims\) size. If the matrices
  593. * do not have CV_64F type they will be converted to the inner matrices of such type for the
  594. * further computing.
  595. * floating-point matrix with \(1 \times nclusters\) or \(nclusters \times 1\) size.
  596. * each sample. It has \(nsamples \times 1\) size and CV_64FC1 type.
  597. * \(\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N\) (indices of the most probable
  598. * mixture component for each sample). It has \(nsamples \times 1\) size and CV_32SC1 type.
  599. * mixture component given the each sample. It has \(nsamples \times nclusters\) size and
  600. * CV_64FC1 type.
  601. * return automatically generated
  602. */
  603. public bool trainE(Mat samples, Mat means0)
  604. {
  605. ThrowIfDisposed();
  606. if (samples != null) samples.ThrowIfDisposed();
  607. if (means0 != null) means0.ThrowIfDisposed();
  608. return ml_EM_trainE_15(nativeObj, samples.nativeObj, means0.nativeObj);
  609. }
  610. //
  611. // C++: bool cv::ml::EM::trainM(Mat samples, Mat probs0, Mat& logLikelihoods = Mat(), Mat& labels = Mat(), Mat& probs = Mat())
  612. //
  613. /**
  614. * Estimate the Gaussian mixture parameters from a samples set.
  615. *
  616. * This variation starts with Maximization step. You need to provide initial probabilities
  617. * \(p_{i,k}\) to use this option.
  618. *
  619. * param samples Samples from which the Gaussian mixture model will be estimated. It should be a
  620. * one-channel matrix, each row of which is a sample. If the matrix does not have CV_64F type
  621. * it will be converted to the inner matrix of such type for the further computing.
  622. * param probs0 the probabilities
  623. * param logLikelihoods The optional output matrix that contains a likelihood logarithm value for
  624. * each sample. It has \(nsamples \times 1\) size and CV_64FC1 type.
  625. * param labels The optional output "class label" for each sample:
  626. * \(\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N\) (indices of the most probable
  627. * mixture component for each sample). It has \(nsamples \times 1\) size and CV_32SC1 type.
  628. * param probs The optional output matrix that contains posterior probabilities of each Gaussian
  629. * mixture component given the each sample. It has \(nsamples \times nclusters\) size and
  630. * CV_64FC1 type.
  631. * return automatically generated
  632. */
  633. public bool trainM(Mat samples, Mat probs0, Mat logLikelihoods, Mat labels, Mat probs)
  634. {
  635. ThrowIfDisposed();
  636. if (samples != null) samples.ThrowIfDisposed();
  637. if (probs0 != null) probs0.ThrowIfDisposed();
  638. if (logLikelihoods != null) logLikelihoods.ThrowIfDisposed();
  639. if (labels != null) labels.ThrowIfDisposed();
  640. if (probs != null) probs.ThrowIfDisposed();
  641. return ml_EM_trainM_10(nativeObj, samples.nativeObj, probs0.nativeObj, logLikelihoods.nativeObj, labels.nativeObj, probs.nativeObj);
  642. }
  643. /**
  644. * Estimate the Gaussian mixture parameters from a samples set.
  645. *
  646. * This variation starts with Maximization step. You need to provide initial probabilities
  647. * \(p_{i,k}\) to use this option.
  648. *
  649. * param samples Samples from which the Gaussian mixture model will be estimated. It should be a
  650. * one-channel matrix, each row of which is a sample. If the matrix does not have CV_64F type
  651. * it will be converted to the inner matrix of such type for the further computing.
  652. * param probs0 the probabilities
  653. * param logLikelihoods The optional output matrix that contains a likelihood logarithm value for
  654. * each sample. It has \(nsamples \times 1\) size and CV_64FC1 type.
  655. * param labels The optional output "class label" for each sample:
  656. * \(\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N\) (indices of the most probable
  657. * mixture component for each sample). It has \(nsamples \times 1\) size and CV_32SC1 type.
  658. * mixture component given the each sample. It has \(nsamples \times nclusters\) size and
  659. * CV_64FC1 type.
  660. * return automatically generated
  661. */
  662. public bool trainM(Mat samples, Mat probs0, Mat logLikelihoods, Mat labels)
  663. {
  664. ThrowIfDisposed();
  665. if (samples != null) samples.ThrowIfDisposed();
  666. if (probs0 != null) probs0.ThrowIfDisposed();
  667. if (logLikelihoods != null) logLikelihoods.ThrowIfDisposed();
  668. if (labels != null) labels.ThrowIfDisposed();
  669. return ml_EM_trainM_11(nativeObj, samples.nativeObj, probs0.nativeObj, logLikelihoods.nativeObj, labels.nativeObj);
  670. }
  671. /**
  672. * Estimate the Gaussian mixture parameters from a samples set.
  673. *
  674. * This variation starts with Maximization step. You need to provide initial probabilities
  675. * \(p_{i,k}\) to use this option.
  676. *
  677. * param samples Samples from which the Gaussian mixture model will be estimated. It should be a
  678. * one-channel matrix, each row of which is a sample. If the matrix does not have CV_64F type
  679. * it will be converted to the inner matrix of such type for the further computing.
  680. * param probs0 the probabilities
  681. * param logLikelihoods The optional output matrix that contains a likelihood logarithm value for
  682. * each sample. It has \(nsamples \times 1\) size and CV_64FC1 type.
  683. * \(\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N\) (indices of the most probable
  684. * mixture component for each sample). It has \(nsamples \times 1\) size and CV_32SC1 type.
  685. * mixture component given the each sample. It has \(nsamples \times nclusters\) size and
  686. * CV_64FC1 type.
  687. * return automatically generated
  688. */
  689. public bool trainM(Mat samples, Mat probs0, Mat logLikelihoods)
  690. {
  691. ThrowIfDisposed();
  692. if (samples != null) samples.ThrowIfDisposed();
  693. if (probs0 != null) probs0.ThrowIfDisposed();
  694. if (logLikelihoods != null) logLikelihoods.ThrowIfDisposed();
  695. return ml_EM_trainM_12(nativeObj, samples.nativeObj, probs0.nativeObj, logLikelihoods.nativeObj);
  696. }
  697. /**
  698. * Estimate the Gaussian mixture parameters from a samples set.
  699. *
  700. * This variation starts with Maximization step. You need to provide initial probabilities
  701. * \(p_{i,k}\) to use this option.
  702. *
  703. * param samples Samples from which the Gaussian mixture model will be estimated. It should be a
  704. * one-channel matrix, each row of which is a sample. If the matrix does not have CV_64F type
  705. * it will be converted to the inner matrix of such type for the further computing.
  706. * param probs0 the probabilities
  707. * each sample. It has \(nsamples \times 1\) size and CV_64FC1 type.
  708. * \(\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N\) (indices of the most probable
  709. * mixture component for each sample). It has \(nsamples \times 1\) size and CV_32SC1 type.
  710. * mixture component given the each sample. It has \(nsamples \times nclusters\) size and
  711. * CV_64FC1 type.
  712. * return automatically generated
  713. */
  714. public bool trainM(Mat samples, Mat probs0)
  715. {
  716. ThrowIfDisposed();
  717. if (samples != null) samples.ThrowIfDisposed();
  718. if (probs0 != null) probs0.ThrowIfDisposed();
  719. return ml_EM_trainM_13(nativeObj, samples.nativeObj, probs0.nativeObj);
  720. }
  721. //
  722. // C++: static Ptr_EM cv::ml::EM::create()
  723. //
  724. /**
  725. * Creates empty %EM model.
  726. * The model should be trained then using StatModel::train(traindata, flags) method. Alternatively, you
  727. * can use one of the EM::train\* methods or load it from file using Algorithm::load&lt;EM&gt;(filename).
  728. * return automatically generated
  729. */
  730. public static EM create()
  731. {
  732. return EM.__fromPtr__(DisposableObject.ThrowIfNullIntPtr(ml_EM_create_10()));
  733. }
  734. //
  735. // C++: static Ptr_EM cv::ml::EM::load(String filepath, String nodeName = String())
  736. //
  737. /**
  738. * Loads and creates a serialized EM from a file
  739. *
  740. * Use EM::save to serialize and store an EM to disk.
  741. * Load the EM from this file again, by calling this function with the path to the file.
  742. * Optionally specify the node for the file containing the classifier
  743. *
  744. * param filepath path to serialized EM
  745. * param nodeName name of node containing the classifier
  746. * return automatically generated
  747. */
  748. public static EM load(string filepath, string nodeName)
  749. {
  750. return EM.__fromPtr__(DisposableObject.ThrowIfNullIntPtr(ml_EM_load_10(filepath, nodeName)));
  751. }
  752. /**
  753. * Loads and creates a serialized EM from a file
  754. *
  755. * Use EM::save to serialize and store an EM to disk.
  756. * Load the EM from this file again, by calling this function with the path to the file.
  757. * Optionally specify the node for the file containing the classifier
  758. *
  759. * param filepath path to serialized EM
  760. * return automatically generated
  761. */
  762. public static EM load(string filepath)
  763. {
  764. return EM.__fromPtr__(DisposableObject.ThrowIfNullIntPtr(ml_EM_load_11(filepath)));
  765. }
  766. #if (UNITY_IOS || UNITY_WEBGL) && !UNITY_EDITOR
  767. const string LIBNAME = "__Internal";
  768. #else
  769. const string LIBNAME = "opencvforunity";
  770. #endif
  771. // C++: int cv::ml::EM::getClustersNumber()
  772. [DllImport(LIBNAME)]
  773. private static extern int ml_EM_getClustersNumber_10(IntPtr nativeObj);
  774. // C++: void cv::ml::EM::setClustersNumber(int val)
  775. [DllImport(LIBNAME)]
  776. private static extern void ml_EM_setClustersNumber_10(IntPtr nativeObj, int val);
  777. // C++: int cv::ml::EM::getCovarianceMatrixType()
  778. [DllImport(LIBNAME)]
  779. private static extern int ml_EM_getCovarianceMatrixType_10(IntPtr nativeObj);
  780. // C++: void cv::ml::EM::setCovarianceMatrixType(int val)
  781. [DllImport(LIBNAME)]
  782. private static extern void ml_EM_setCovarianceMatrixType_10(IntPtr nativeObj, int val);
  783. // C++: TermCriteria cv::ml::EM::getTermCriteria()
  784. [DllImport(LIBNAME)]
  785. private static extern void ml_EM_getTermCriteria_10(IntPtr nativeObj, double[] retVal);
  786. // C++: void cv::ml::EM::setTermCriteria(TermCriteria val)
  787. [DllImport(LIBNAME)]
  788. private static extern void ml_EM_setTermCriteria_10(IntPtr nativeObj, int val_type, int val_maxCount, double val_epsilon);
  789. // C++: Mat cv::ml::EM::getWeights()
  790. [DllImport(LIBNAME)]
  791. private static extern IntPtr ml_EM_getWeights_10(IntPtr nativeObj);
  792. // C++: Mat cv::ml::EM::getMeans()
  793. [DllImport(LIBNAME)]
  794. private static extern IntPtr ml_EM_getMeans_10(IntPtr nativeObj);
  795. // C++: void cv::ml::EM::getCovs(vector_Mat& covs)
  796. [DllImport(LIBNAME)]
  797. private static extern void ml_EM_getCovs_10(IntPtr nativeObj, IntPtr covs_mat_nativeObj);
  798. // C++: float cv::ml::EM::predict(Mat samples, Mat& results = Mat(), int flags = 0)
  799. [DllImport(LIBNAME)]
  800. private static extern float ml_EM_predict_10(IntPtr nativeObj, IntPtr samples_nativeObj, IntPtr results_nativeObj, int flags);
  801. [DllImport(LIBNAME)]
  802. private static extern float ml_EM_predict_11(IntPtr nativeObj, IntPtr samples_nativeObj, IntPtr results_nativeObj);
  803. [DllImport(LIBNAME)]
  804. private static extern float ml_EM_predict_12(IntPtr nativeObj, IntPtr samples_nativeObj);
  805. // C++: Vec2d cv::ml::EM::predict2(Mat sample, Mat& probs)
  806. [DllImport(LIBNAME)]
  807. private static extern void ml_EM_predict2_10(IntPtr nativeObj, IntPtr sample_nativeObj, IntPtr probs_nativeObj, double[] retVal);
  808. // C++: bool cv::ml::EM::trainEM(Mat samples, Mat& logLikelihoods = Mat(), Mat& labels = Mat(), Mat& probs = Mat())
  809. [DllImport(LIBNAME)]
  810. [return: MarshalAs(UnmanagedType.U1)]
  811. private static extern bool ml_EM_trainEM_10(IntPtr nativeObj, IntPtr samples_nativeObj, IntPtr logLikelihoods_nativeObj, IntPtr labels_nativeObj, IntPtr probs_nativeObj);
  812. [DllImport(LIBNAME)]
  813. [return: MarshalAs(UnmanagedType.U1)]
  814. private static extern bool ml_EM_trainEM_11(IntPtr nativeObj, IntPtr samples_nativeObj, IntPtr logLikelihoods_nativeObj, IntPtr labels_nativeObj);
  815. [DllImport(LIBNAME)]
  816. [return: MarshalAs(UnmanagedType.U1)]
  817. private static extern bool ml_EM_trainEM_12(IntPtr nativeObj, IntPtr samples_nativeObj, IntPtr logLikelihoods_nativeObj);
  818. [DllImport(LIBNAME)]
  819. [return: MarshalAs(UnmanagedType.U1)]
  820. private static extern bool ml_EM_trainEM_13(IntPtr nativeObj, IntPtr samples_nativeObj);
  821. // C++: bool cv::ml::EM::trainE(Mat samples, Mat means0, Mat covs0 = Mat(), Mat weights0 = Mat(), Mat& logLikelihoods = Mat(), Mat& labels = Mat(), Mat& probs = Mat())
  822. [DllImport(LIBNAME)]
  823. [return: MarshalAs(UnmanagedType.U1)]
  824. private static extern bool ml_EM_trainE_10(IntPtr nativeObj, IntPtr samples_nativeObj, IntPtr means0_nativeObj, IntPtr covs0_nativeObj, IntPtr weights0_nativeObj, IntPtr logLikelihoods_nativeObj, IntPtr labels_nativeObj, IntPtr probs_nativeObj);
  825. [DllImport(LIBNAME)]
  826. [return: MarshalAs(UnmanagedType.U1)]
  827. private static extern bool ml_EM_trainE_11(IntPtr nativeObj, IntPtr samples_nativeObj, IntPtr means0_nativeObj, IntPtr covs0_nativeObj, IntPtr weights0_nativeObj, IntPtr logLikelihoods_nativeObj, IntPtr labels_nativeObj);
  828. [DllImport(LIBNAME)]
  829. [return: MarshalAs(UnmanagedType.U1)]
  830. private static extern bool ml_EM_trainE_12(IntPtr nativeObj, IntPtr samples_nativeObj, IntPtr means0_nativeObj, IntPtr covs0_nativeObj, IntPtr weights0_nativeObj, IntPtr logLikelihoods_nativeObj);
  831. [DllImport(LIBNAME)]
  832. [return: MarshalAs(UnmanagedType.U1)]
  833. private static extern bool ml_EM_trainE_13(IntPtr nativeObj, IntPtr samples_nativeObj, IntPtr means0_nativeObj, IntPtr covs0_nativeObj, IntPtr weights0_nativeObj);
  834. [DllImport(LIBNAME)]
  835. [return: MarshalAs(UnmanagedType.U1)]
  836. private static extern bool ml_EM_trainE_14(IntPtr nativeObj, IntPtr samples_nativeObj, IntPtr means0_nativeObj, IntPtr covs0_nativeObj);
  837. [DllImport(LIBNAME)]
  838. [return: MarshalAs(UnmanagedType.U1)]
  839. private static extern bool ml_EM_trainE_15(IntPtr nativeObj, IntPtr samples_nativeObj, IntPtr means0_nativeObj);
  840. // C++: bool cv::ml::EM::trainM(Mat samples, Mat probs0, Mat& logLikelihoods = Mat(), Mat& labels = Mat(), Mat& probs = Mat())
  841. [DllImport(LIBNAME)]
  842. [return: MarshalAs(UnmanagedType.U1)]
  843. private static extern bool ml_EM_trainM_10(IntPtr nativeObj, IntPtr samples_nativeObj, IntPtr probs0_nativeObj, IntPtr logLikelihoods_nativeObj, IntPtr labels_nativeObj, IntPtr probs_nativeObj);
  844. [DllImport(LIBNAME)]
  845. [return: MarshalAs(UnmanagedType.U1)]
  846. private static extern bool ml_EM_trainM_11(IntPtr nativeObj, IntPtr samples_nativeObj, IntPtr probs0_nativeObj, IntPtr logLikelihoods_nativeObj, IntPtr labels_nativeObj);
  847. [DllImport(LIBNAME)]
  848. [return: MarshalAs(UnmanagedType.U1)]
  849. private static extern bool ml_EM_trainM_12(IntPtr nativeObj, IntPtr samples_nativeObj, IntPtr probs0_nativeObj, IntPtr logLikelihoods_nativeObj);
  850. [DllImport(LIBNAME)]
  851. [return: MarshalAs(UnmanagedType.U1)]
  852. private static extern bool ml_EM_trainM_13(IntPtr nativeObj, IntPtr samples_nativeObj, IntPtr probs0_nativeObj);
  853. // C++: static Ptr_EM cv::ml::EM::create()
  854. [DllImport(LIBNAME)]
  855. private static extern IntPtr ml_EM_create_10();
  856. // C++: static Ptr_EM cv::ml::EM::load(String filepath, String nodeName = String())
  857. [DllImport(LIBNAME)]
  858. private static extern IntPtr ml_EM_load_10(string filepath, string nodeName);
  859. [DllImport(LIBNAME)]
  860. private static extern IntPtr ml_EM_load_11(string filepath);
  861. // native support for java finalize()
  862. [DllImport(LIBNAME)]
  863. private static extern void ml_EM_delete(IntPtr nativeObj);
  864. }
  865. }