Direct Graphical Models  v.1.7.0
TrainNode.cpp
1 #include "TrainNode.h"
2 
3 #include "TrainNodeNaiveBayes.h"
4 #include "TrainNodeGM.h"
5 #include "TrainNodeGMM.h"
6 #include "TrainNodeCvGM.h"
7 #include "TrainNodeCvGMM.h"
8 #include "TrainNodeKNN.h"
9 #include "TrainNodeCvKNN.h"
10 #include "TrainNodeCvRF.h"
11 #include "TrainNodeMsRF.h"
12 #include "TrainNodeCvANN.h"
13 #include "TrainNodeCvSVM.h"
14 
15 #include "macroses.h"
16 
17 namespace DirectGraphicalModels
18 {
19  // Factory method
20  std::shared_ptr<CTrainNode> CTrainNode::create(byte nodeRandomModel, byte nStates, word nFeatures)
21  {
22  switch (nodeRandomModel)
23  {
24  case NodeRandomModel::Bayes: return std::make_shared<CTrainNodeBayes>(nStates, nFeatures);
25  case NodeRandomModel::GM: return std::make_shared<CTrainNodeGM>(nStates, nFeatures);
26  case NodeRandomModel::GMM: return std::make_shared<CTrainNodeGMM>(nStates, nFeatures);
27  case NodeRandomModel::CvGM: return std::make_shared<CTrainNodeCvGM>(nStates, nFeatures);
28  case NodeRandomModel::CvGMM: return std::make_shared<CTrainNodeCvGMM>(nStates, nFeatures);
29  case NodeRandomModel::KNN: return std::make_shared<CTrainNodeKNN>(nStates, nFeatures);
30  case NodeRandomModel::CvKNN: return std::make_shared<CTrainNodeCvKNN>(nStates, nFeatures);
31  case NodeRandomModel::CvRF: return std::make_shared<CTrainNodeCvRF>(nStates, nFeatures);
32 #ifdef USE_SHERWOOD
33  case NodeRandomModel::MsRF: return std::make_shared<CTrainNodeMsRF>(nStates, nFeatures);
34 #endif
35  case NodeRandomModel::CvANN: return std::make_shared<CTrainNodeCvANN>(nStates, nFeatures);
36  case NodeRandomModel::CvSVM: return std::make_shared<CTrainNodeCvSVM>(nStates, nFeatures);
37  default:
38  DGM_ASSERT_MSG(false, "Unknown type of the node random model");
39  }
40  }
41 
42  void CTrainNode::addFeatureVecs(const Mat &featureVectors, const Mat &gt)
43  {
44  DGM_ASSERT_MSG(featureVectors.channels() == getNumFeatures(), "Number of features in the <featureVectors> (%d) does not correspond to the specified (%d)", featureVectors.channels(), getNumFeatures());
45  DGM_VECTORWISE1<CTrainNode, &CTrainNode::addFeatureVec>(*this, featureVectors, gt);
46  }
47 
48  void CTrainNode::addFeatureVecs(const vec_mat_t &featureVectors, const Mat &gt)
49  {
50  DGM_ASSERT_MSG(featureVectors.size() == getNumFeatures(), "Number of features in the <featureVectors> (%zu) does not correspond to the specified (%d)", featureVectors.size(), getNumFeatures());
51  DGM_VECTORWISE1<CTrainNode, &CTrainNode::addFeatureVec>(*this, featureVectors, gt);
52  }
53 
54  Mat CTrainNode::getNodePotentials(const Mat &featureVectors, const Mat &weights, float Z) const
55  {
56  // Assertions
57  DGM_ASSERT_MSG(featureVectors.channels() == getNumFeatures(), "Number of features in the <featureVectors> (%d) does not correspond to the specified (%d)", featureVectors.channels(), getNumFeatures());
58  DGM_ASSERT(featureVectors.depth() == CV_8U);
59  if (!weights.empty()) {
60  DGM_ASSERT(featureVectors.size() == weights.size());
61  DGM_ASSERT(weights.type() == CV_32FC1);
62  }
63 
64  Mat res(featureVectors.size(), CV_32FC(m_nStates));
65 #ifdef ENABLE_PPL
66  concurrency::parallel_for(0, res.rows, [&] (int y) {
67  Mat pot;
68  Mat vec(getNumFeatures(), 1, CV_8UC1);
69 #else
70  Mat pot;
71  Mat vec(getNumFeatures(), 1, CV_8UC1);
72  for (int y = 0; y < res.rows; y++) {
73 #endif
74  const byte *pFv = featureVectors.ptr<byte>(y);
75  const float *pW = weights.empty() ? NULL : weights.ptr<float>(y);
76  float *pRes = res.ptr<float>(y);
77  for (int x = 0; x < res.cols; x++) {
78  float weight = pW ? pW[x] : 1.0f;
79  for (int f = 0; f < getNumFeatures(); f++) vec.at<byte>(f, 0) = pFv[getNumFeatures() * x + f];
80  pot = getNodePotentials(vec, weight, Z);
81  for (int s = 0; s < m_nStates; s++) pRes[m_nStates * x + s] = pot.at<float>(s, 0);
82  } // x
83  } // y
84 #ifdef ENABLE_PPL
85  );
86 #endif
87 
88  return res;
89  }
90 
91  Mat CTrainNode::getNodePotentials(const vec_mat_t &featureVectors, const Mat &weights, float Z) const
92  {
93  DGM_ASSERT_MSG(featureVectors.size() == getNumFeatures(), "Number of features in the <featureVectors> (%zu) does not correspond to the specified (%d)", featureVectors.size(), getNumFeatures());
94  DGM_ASSERT(featureVectors[0].depth() == CV_8U);
95  if (!weights.empty()) {
96  DGM_ASSERT(featureVectors[0].size() == weights.size());
97  DGM_ASSERT(weights.type() == CV_32FC1);
98  }
99 
100  Mat res(featureVectors[0].size(), CV_32FC(m_nStates));
101 #ifdef ENABLE_PPL
102  concurrency::parallel_for(0, res.rows, [&](int y) {
103  Mat pot;
104  Mat vec(getNumFeatures(), 1, CV_8UC1);
105 #else
106  Mat pot;
107  Mat vec(getNumFeatures(), 1, CV_8UC1);
108  for (int y = 0; y < res.rows; y++) {
109 #endif
110  const byte **pFv = new const byte *[getNumFeatures()];
111  for (word f = 0; f < getNumFeatures(); f++) pFv[f] = featureVectors[f].ptr<byte>(y);
112  const float *pW = weights.empty() ? NULL : weights.ptr<float>(y);
113  float *pRes = res.ptr<float>(y);
114  for (int x = 0; x < res.cols; x++) {
115  float weight = pW ? pW[x] : 1.0f;
116  for (int f = 0; f < getNumFeatures(); f++) vec.at<byte>(f, 0) = pFv[f][x];
117  pot = getNodePotentials(vec, weight, Z);
118  for (int s = 0; s < m_nStates; s++) pRes[m_nStates * x + s] = pot.at<float>(s, 0);
119  } // x
120  delete[] pFv;
121  } // y
122 #ifdef ENABLE_PPL
123  );
124 #endif
125 
126  return res;
127  }
128 
129  Mat CTrainNode::getNodePotentials(const Mat &featureVector, float weight, float Z) const
130  {
131  // Assertions
132  DGM_ASSERT_MSG(featureVector.type() == CV_8UC1,
133  "The input feature vector has either wrong depth or more than one channel");
134  DGM_ASSERT_MSG((featureVector.size().width == 1) && (featureVector.size().height == getNumFeatures()),
135  "The input feature vector has wrong size:(%d, %d)", featureVector.size().width, featureVector.size().height);
136 
137  Mat res(m_nStates, 1, CV_32FC1, Scalar(0));
138  const_cast<Mat &>(m_mask).setTo(1);
139  calculateNodePotentials(featureVector, res, const_cast<Mat &>(m_mask));
140  if (weight != 1.0f) pow(res, weight, res);
141 
142  // Normalization
143  float Sum = static_cast<float>(sum(res).val[0]);
144  if (Sum < FLT_EPSILON) {
145  res.setTo(FLT_EPSILON, m_mask); // Case of too small potentials (make all the cases equaly small probable)
146  } else {
147  if (Z > FLT_EPSILON)
148  res *= 100.0 / Z;
149  else
150  res *= 100.0 / Sum;
151  }
152 
153  return res;
154  }
155 }
void addFeatureVecs(const Mat &featureVectors, const Mat &gt)
Adds a block of new feature vectors.
Definition: TrainNode.cpp:42
virtual void calculateNodePotentials(const Mat &featureVector, Mat &potential, Mat &mask) const =0
Calculates the node potential, based on the feature vector.
word getNumFeatures(void) const
Returns number of features.
Definition: ITrain.h:37
Mat getNodePotentials(const Mat &featureVectors, const Mat &weights=Mat(), float Z=0.0f) const
Returns a block of node potentials, based on the block of feature vector.
Definition: TrainNode.cpp:54
static std::shared_ptr< CTrainNode > create(byte nodeRandomModel, byte nStates, word nFeatures)
Factory method returning node trainer object.
Definition: TrainNode.cpp:20
byte m_nStates
The number of states (classes)