Direct Graphical Models  v.1.7.0
GraphLayeredExt.cpp
1 #include "GraphLayeredExt.h"
2 #include "GraphPairwise.h"
3 
4 #include "TrainNode.h"
5 #include "TrainEdge.h"
6 #include "TrainEdgePotts.h"
7 #include "TrainLink.h"
8 #include "TrainEdgePottsCS.h"
9 #include "macroses.h"
10 
11 namespace DirectGraphicalModels
12 {
13  void CGraphLayeredExt::buildGraph(Size graphSize)
14  {
15  if (m_graph.getNumNodes() != 0) m_graph.reset();
16  m_size = graphSize;
17 
18  word l;
19  for (int y = 0; y < m_size.height; y++)
20  for (int x = 0; x < m_size.width; x++) {
21  // Nodes
22  size_t idx = m_graph.addNode();
23  for (l = 1; l < m_nLayers; l++) m_graph.addNode();
24 
25  // All links have group_id = 1
26  if (m_gType & GRAPH_EDGES_LINK) {
27  if (m_nLayers >= 2)
28  m_graph.addArc(idx, idx + 1, 1, Mat());
29  for (l = 2; l < m_nLayers; l++)
30  m_graph.addEdge(idx + l - 1, idx + l, 1, Mat());
31  } // if LINK
32 
33  if (m_gType & GRAPH_EDGES_GRID) {
34  if (x > 0)
35  for (l = 0; l < m_nLayers; l++)
36  m_graph.addArc(idx + l, idx + l - m_nLayers);
37  if (y > 0)
38  for (l = 0; l < m_nLayers; l++)
39  m_graph.addArc(idx + l, idx + l - m_nLayers * m_size.width);
40  } // if GRID
41  } // x
42 
43 
44  if (m_gType & GRAPH_EDGES_DIAG) {
45  for (int y = 0; y < m_size.height; y++) {
46  for (int x = 0; x < m_size.width; x++) {
47  size_t idx = (y * m_size.width + x) * m_nLayers;
48 
49  if ((x > 0) && (y > 0))
50  for (l = 0; l < m_nLayers; l++)
51  m_graph.addArc(idx + l, idx + l - m_nLayers * (m_size.width + 1));
52 
53  if ((x < graphSize.width - 1) && (y > 0))
54  for (l = 0; l < m_nLayers; l++)
55  m_graph.addArc(idx + l, idx + l - m_nLayers * (m_size.width - 1));
56  } // x
57  } // y
58  } // if DIAG
59  }
60 
61  void CGraphLayeredExt::setGraph(const Mat& pots)
62  {
63  DGM_ASSERT_MSG(m_nLayers == 1, "When more than 1 layer is present, use CGraphLayeredExt::setGraph(const Mat&, const Mat&) function instead.");
64  setGraph(pots, Mat());
65  }
66 
67  void CGraphLayeredExt::addDefaultEdgesModel(float val, float weight)
68  {
69  if (weight != 1.0f) val = powf(val, weight);
70  const byte nStates = m_graph.getNumStates();
71  m_graph.setEdges(0, CTrainEdge::getDefaultEdgePotentials(sqrtf(val), nStates));
73  }
74 
75  void CGraphLayeredExt::addDefaultEdgesModel(const Mat &featureVectors, float val, float weight)
76  {
77  const byte nStates = m_graph.getNumStates();
78  const word nFeatures = featureVectors.channels();
79  const CTrainEdgePottsCS edgeTrainer(nStates, nFeatures);
80  fillEdges(edgeTrainer, NULL, featureVectors, { val, 0.001f }, weight);
82  }
83 
84  void CGraphLayeredExt::addDefaultEdgesModel(const vec_mat_t &featureVectors, float val, float weight)
85  {
86  const byte nStates = m_graph.getNumStates();
87  const word nFeatures = static_cast<word>(featureVectors.size());
88  const CTrainEdgePottsCS edgeTrainer(nStates, nFeatures);
89  fillEdges(edgeTrainer, NULL, featureVectors, { val, 0.001f }, weight);
91  }
92 
93  void CGraphLayeredExt::setGraph(const Mat &potBase, const Mat &potOccl)
94  {
95  // Assertions
96  DGM_ASSERT(!potBase.empty());
97  DGM_ASSERT(CV_32F == potBase.depth());
98  if (!potOccl.empty()) {
99  DGM_ASSERT(potBase.size() == potOccl.size());
100  DGM_ASSERT(CV_32F == potOccl.depth());
101  }
102  if (m_size != potBase.size()) buildGraph(potBase.size());
103  DGM_ASSERT(m_size.height == potBase.rows);
104  DGM_ASSERT(m_size.width == potBase.cols);
105  DGM_ASSERT(m_size.width * m_size.height * m_nLayers == m_graph.getNumNodes());
106 
107  byte nStatesBase = static_cast<byte>(potBase.channels());
108  byte nStatesOccl = potOccl.empty() ? 0 : static_cast<byte>(potOccl.channels());
109  if (m_nLayers >= 2) DGM_ASSERT(nStatesOccl);
110  DGM_ASSERT(nStatesBase + nStatesOccl == m_graph.getNumStates());
111 
112 #ifdef ENABLE_PPL
113  concurrency::parallel_for(0, m_size.height, [&, nStatesBase, nStatesOccl](int y) {
114  Mat nPotBase(m_graph.getNumStates(), 1, CV_32FC1, Scalar(0.0f));
115  Mat nPotOccl(m_graph.getNumStates(), 1, CV_32FC1, Scalar(0.0f));
116  Mat nPotIntr(m_graph.getNumStates(), 1, CV_32FC1, Scalar(0.0f));
117  for (byte s = 0; s < nStatesOccl; s++)
118  nPotIntr.at<float>(m_graph.getNumStates() - nStatesOccl + s, 0) = 100.0f / nStatesOccl;
119 #else
120  Mat nPotBase(m_graph.getNumStates(), 1, CV_32FC1, Scalar(0.0f));
121  Mat nPotOccl(m_graph.getNumStates(), 1, CV_32FC1, Scalar(0.0f));
122  Mat nPotIntr(m_graph.getNumStates(), 1, CV_32FC1, Scalar(0.0f));
123  for (byte s = 0; s < nStatesOccl; s++)
124  nPotIntr.at<float>(m_graph.getNumStates() - nStatesOccl + s, 0) = 100.0f / nStatesOccl;
125  for (int y = 0; y < m_size.height; y++) {
126 #endif
127  const float *pPotBase = potBase.ptr<float>(y);
128  const float *pPotOccl = potOccl.empty() ? NULL : potOccl.ptr<float>(y);
129  for (int x = 0; x < m_size.width; x++) {
130  size_t idx = (y * m_size.width + x) * m_nLayers;
131 
132  for (byte s = 0; s < nStatesBase; s++)
133  nPotBase.at<float>(s, 0) = pPotBase[nStatesBase * x + s];
134  m_graph.setNode(idx, nPotBase);
135 
136  if (m_nLayers >= 2) {
137  for (byte s = 0; s < nStatesOccl; s++)
138  nPotOccl.at<float>(m_graph.getNumStates() - nStatesOccl + s, 0) = pPotOccl[nStatesOccl * x + s];
139  m_graph.setNode(idx + 1, nPotOccl);
140  }
141 
142  for (word l = 2; l < m_nLayers; l++)
143  m_graph.setNode(idx + l, nPotIntr);
144  } // x
145  } // y
146 #ifdef ENABLE_PPL
147  );
148 #endif
149  }
150 
151  void CGraphLayeredExt::addFeatureVecs(CTrainEdge &edgeTrainer, const Mat &featureVectors, const Mat &gt)
152  {
153  // Assertions
154  DGM_ASSERT_MSG(featureVectors.size() == gt.size(), "The size of <featureVectors> does not correspond to the size of <gt>");
155  DGM_ASSERT_MSG(featureVectors.depth() == CV_8U, "The argument <featureVectors> has wrong depth");
156  DGM_ASSERT_MSG(gt.type() == CV_8UC1, "The argument <gt> has either wrong depth or more than one channel");
157  DGM_ASSERT_MSG(featureVectors.channels() == edgeTrainer.getNumFeatures(),
158  "Number of features in the <featureVectors> (%d) does not correspond to the specified (%d)", featureVectors.channels(), edgeTrainer.getNumFeatures());
159 
160  const word nFeatures = featureVectors.channels();
161 
162  Mat featureVector1(nFeatures, 1, CV_8UC1);
163  Mat featureVector2(nFeatures, 1, CV_8UC1);
164 
165  for (int y = 0; y < gt.rows; y++) {
166  const byte *pFV1 = featureVectors.ptr<byte>(y);
167  const byte *pFV2 = y > 0 ? featureVectors.ptr<byte>(y - 1) : NULL;
168  const byte *pGt1 = gt.ptr<byte>(y);
169  const byte *pGt2 = y > 0 ? gt.ptr<byte>(y - 1) : NULL;
170  for (int x = 0; x < gt.cols; x++) {
171  for (word f = 0; f < nFeatures; f++) featureVector1.at<byte>(f, 0) = pFV1[nFeatures * x + f]; // featureVector[x][y]
172  if (m_gType & GRAPH_EDGES_GRID) {
173  if (x > 0) {
174  for (word f = 0; f < nFeatures; f++) featureVector2.at<byte>(f, 0) = pFV1[nFeatures * (x - 1) + f]; // featureVector[x-1][y]
175  edgeTrainer.addFeatureVecs(featureVector1, pGt1[x], featureVector2, pGt1[x - 1]);
176  edgeTrainer.addFeatureVecs(featureVector2, pGt1[x - 1], featureVector1, pGt1[x]);
177  }
178  if (y > 0) {
179  for (word f = 0; f < nFeatures; f++) featureVector2.at<byte>(f, 0) = pFV2[nFeatures * x + f]; // featureVector[x][y-1]
180  edgeTrainer.addFeatureVecs(featureVector1, pGt1[x], featureVector2, pGt2[x]);
181  edgeTrainer.addFeatureVecs(featureVector2, pGt2[x], featureVector1, pGt1[x]);
182  }
183  }
184  if (m_gType & GRAPH_EDGES_DIAG) {
185  if ((x > 0) && (y > 0)) {
186  for (word f = 0; f < nFeatures; f++) featureVector2.at<byte>(f, 0) = pFV2[nFeatures * (x - 1) + f]; // featureVector[x-1][y-1]
187  edgeTrainer.addFeatureVecs(featureVector1, pGt1[x], featureVector2, pGt2[x - 1]);
188  edgeTrainer.addFeatureVecs(featureVector2, pGt2[x - 1], featureVector1, pGt1[x]);
189  }
190  if ((x < gt.cols - 1) && (y > 0)) {
191  for (word f = 0; f < nFeatures; f++) featureVector2.at<byte>(f, 0) = pFV2[nFeatures * (x + 1) + f]; // featureVector[x+1][y-1]
192  edgeTrainer.addFeatureVecs(featureVector1, pGt1[x], featureVector2, pGt2[x + 1]);
193  edgeTrainer.addFeatureVecs(featureVector2, pGt2[x + 1], featureVector1, pGt1[x]);
194  }
195  }
196  } // x
197  } // y
198  }
199 
200  void CGraphLayeredExt::addFeatureVecs(CTrainEdge &edgeTrainer, const vec_mat_t &featureVectors, const Mat &gt)
201  {
202  // Assertions
203  DGM_ASSERT_MSG(featureVectors[0].size() == gt.size(), "The size of <featureVectors> does not correspond to the size of <gt>");
204  DGM_ASSERT_MSG(featureVectors[0].type() == CV_8UC1, "The argument <featureVectors> has either wrong depth or more than one channel");
205  DGM_ASSERT_MSG(gt.type() == CV_8UC1, "The argument <gt> has either wrong depth or more than one channel");
206  DGM_ASSERT_MSG(featureVectors.size() == edgeTrainer.getNumFeatures(),
207  "Number of features in the <featureVectors> (%zu) does not correspond to the specified (%d)", featureVectors.size(), edgeTrainer.getNumFeatures());
208 
209  const word nFeatures = static_cast<word>(featureVectors.size());
210 
211  Mat featureVector1(nFeatures, 1, CV_8UC1);
212  Mat featureVector2(nFeatures, 1, CV_8UC1);
213 
214  std::vector<const byte *> vFV1(nFeatures);
215  std::vector<const byte *> vFV2(nFeatures);
216  for (int y = 0; y < gt.rows; y++) {
217  for (word f = 0; f < nFeatures; f++) {
218  vFV1[f] = featureVectors[f].ptr<byte>(y);
219  if (y > 0) vFV2[f] = featureVectors[f].ptr<byte>(y - 1);
220  }
221  const byte *pGt1 = gt.ptr<byte>(y);
222  const byte *pGt2 = y > 0 ? gt.ptr<byte>(y - 1) : NULL;
223  for (int x = 0; x < gt.cols; x++) {
224  for (word f = 0; f < nFeatures; f++) featureVector1.at<byte>(f, 0) = vFV1[f][x]; // featureVector[x][y]
225  if (m_gType & GRAPH_EDGES_GRID) {
226  if (x > 0) {
227  for (word f = 0; f < nFeatures; f++) featureVector2.at<byte>(f, 0) = vFV1[f][x - 1]; // featureVector[x-1][y]
228  edgeTrainer.addFeatureVecs(featureVector1, pGt1[x], featureVector2, pGt1[x - 1]);
229  edgeTrainer.addFeatureVecs(featureVector2, pGt1[x - 1], featureVector1, pGt1[x]);
230  }
231  if (y > 0) {
232  for (word f = 0; f < nFeatures; f++) featureVector2.at<byte>(f, 0) = vFV2[f][x]; // featureVector[x][y-1]
233  edgeTrainer.addFeatureVecs(featureVector1, pGt1[x], featureVector2, pGt2[x]);
234  edgeTrainer.addFeatureVecs(featureVector2, pGt2[x], featureVector1, pGt1[x]);
235  }
236  }
237  if (m_gType & GRAPH_EDGES_DIAG) {
238  if ((x > 0) && (y > 0)) {
239  for (word f = 0; f < nFeatures; f++) featureVector2.at<byte>(f, 0) = vFV2[f][x - 1]; // featureVector[x-1][y-1]
240  edgeTrainer.addFeatureVecs(featureVector1, pGt1[x], featureVector2, pGt2[x - 1]);
241  edgeTrainer.addFeatureVecs(featureVector2, pGt2[x - 1], featureVector1, pGt1[x]);
242  }
243  if ((x < gt.cols - 1) && (y > 0)) {
244  for (word f = 0; f < nFeatures; f++) featureVector2.at<byte>(f, 0) = vFV2[f][x + 1]; // featureVector[x+1][y-1]
245  edgeTrainer.addFeatureVecs(featureVector1, pGt1[x], featureVector2, pGt2[x + 1]);
246  edgeTrainer.addFeatureVecs(featureVector2, pGt2[x + 1], featureVector1, pGt1[x]);
247  }
248  }
249  } // x
250  } // y
251  }
252 
253  void CGraphLayeredExt::fillEdges(const CTrainEdge& edgeTrainer, const CTrainLink* linkTrainer, const Mat& featureVectors, const vec_float_t& vParams, float edgeWeight, float linkWeight)
254  {
255  const word nFeatures = featureVectors.channels();
256 
257  // Assertions
258  DGM_ASSERT(m_size.height == featureVectors.rows);
259  DGM_ASSERT(m_size.width == featureVectors.cols);
260  DGM_ASSERT(nFeatures == edgeTrainer.getNumFeatures());
261  if (linkTrainer) DGM_ASSERT(nFeatures == linkTrainer->getNumFeatures());
262  DGM_ASSERT(m_size.width * m_size.height * m_nLayers == m_graph.getNumNodes());
263 
264 #ifdef ENABLE_PPL
265  concurrency::parallel_for(0, m_size.height, [&, nFeatures](int y) {
266  Mat featureVector1(nFeatures, 1, CV_8UC1);
267  Mat featureVector2(nFeatures, 1, CV_8UC1);
268  Mat ePot;
269  word l;
270 #else
271  Mat featureVector1(nFeatures, 1, CV_8UC1);
272  Mat featureVector2(nFeatures, 1, CV_8UC1);
273  Mat ePot;
274  word l;
275  for (int y = 0; y < m_size.height; y++) {
276 #endif
277  const byte *pFv1 = featureVectors.ptr<byte>(y);
278  const byte *pFv2 = (y > 0) ? featureVectors.ptr<byte>(y - 1) : NULL;
279  for (int x = 0; x < m_size.width; x++) {
280  size_t idx = (y * m_size.width + x) * m_nLayers;
281  for (word f = 0; f < nFeatures; f++) featureVector1.at<byte>(f, 0) = pFv1[nFeatures * x + f]; // featureVectors[x][y]
282 
283  if (m_gType & GRAPH_EDGES_LINK) {
284  ePot = linkTrainer->getLinkPotentials(featureVector1, linkWeight);
285  add(ePot, ePot.t(), ePot);
286  if (m_nLayers >= 2)
287  m_graph.setArc(idx, idx + 1, ePot);
288  ePot = CTrainEdge::getDefaultEdgePotentials(100, m_graph.getNumStates());
289  for (l = 2; l < m_nLayers; l++)
290  m_graph.setEdge(idx + l - 1, idx + l, ePot);
291  } // edges_link
292 
293  if (m_gType & GRAPH_EDGES_GRID) {
294  if (x > 0) {
295  for (word f = 0; f < nFeatures; f++) featureVector2.at<byte>(f, 0) = pFv1[nFeatures * (x - 1) + f]; // featureVectors[x-1][y]
296  ePot = edgeTrainer.getEdgePotentials(featureVector1, featureVector2, vParams, edgeWeight);
297  for (word l = 0; l < m_nLayers; l++) m_graph.setArc(idx + l, idx + l - m_nLayers, ePot);
298  } // if x
299 
300  if (y > 0) {
301  for (word f = 0; f < nFeatures; f++) featureVector2.at<byte>(f, 0) = pFv2[nFeatures * x + f]; // featureVectors[x][y-1]
302  ePot = edgeTrainer.getEdgePotentials(featureVector1, featureVector2, vParams, edgeWeight);
303  for (word l = 0; l < m_nLayers; l++) m_graph.setArc(idx + l, idx + l - m_nLayers * m_size.width, ePot);
304  } // if y
305  } // edges_grid
306 
307  if (m_gType & GRAPH_EDGES_DIAG) {
308  if ((x > 0) && (y > 0)) {
309  for (word f = 0; f < nFeatures; f++) featureVector2.at<byte>(f, 0) = pFv2[nFeatures * (x - 1) + f]; // featureVectors[x-1][y-1]
310  ePot = edgeTrainer.getEdgePotentials(featureVector1, featureVector2, vParams, edgeWeight);
311  for (word l = 0; l < m_nLayers; l++) m_graph.setArc(idx + l, idx + l - m_nLayers * m_size.width - m_nLayers, ePot);
312  } // if x, y
313 
314  if ((x < m_size.width - 1) && (y > 0)) {
315  for (word f = 0; f < nFeatures; f++) featureVector2.at<byte>(f, 0) = pFv2[nFeatures * (x + 1) + f]; // featureVectors[x+1][y-1]
316  ePot = edgeTrainer.getEdgePotentials(featureVector1, featureVector2, vParams, edgeWeight);
317  for (word l = 0; l < m_nLayers; l++) m_graph.setArc(idx + l, idx + l - m_nLayers * m_size.width + m_nLayers, ePot);
318  } // x, y
319  } // edges_diag
320  } // x
321 #ifdef ENABLE_PPL
322  }); // y
323 #else
324  } // y
325 #endif
326  }
327 
328  void CGraphLayeredExt::fillEdges(const CTrainEdge& edgeTrainer, const CTrainLink* linkTrainer, const vec_mat_t& featureVectors, const vec_float_t& vParams, float edgeWeight, float linkWeight)
329  {
330  const word nFeatures =static_cast<word>(featureVectors.size());
331 
332  // Assertions
333  DGM_ASSERT(m_size.height == featureVectors[0].rows);
334  DGM_ASSERT(m_size.width == featureVectors[0].cols);
335  DGM_ASSERT(nFeatures == edgeTrainer.getNumFeatures());
336  if (linkTrainer) DGM_ASSERT(nFeatures == linkTrainer->getNumFeatures());
337  DGM_ASSERT(m_size.width * m_size.height * m_nLayers == m_graph.getNumNodes());
338 
339 #ifdef ENABLE_PPL
340  concurrency::parallel_for(0, m_size.height, [&, nFeatures](int y) {
341  Mat featureVector1(nFeatures, 1, CV_8UC1);
342  Mat featureVector2(nFeatures, 1, CV_8UC1);
343  Mat ePot;
344  word l;
345 #else
346  Mat featureVector1(nFeatures, 1, CV_8UC1);
347  Mat featureVector2(nFeatures, 1, CV_8UC1);
348  Mat ePot;
349  word l;
350  for (int y = 0; y < m_size.height; y++) {
351 #endif
352  byte const **pFv1 = new const byte * [nFeatures];
353  for (word f = 0; f < nFeatures; f++) pFv1[f] = featureVectors[f].ptr<byte>(y);
354  byte const **pFv2 = NULL;
355  if (y > 0) {
356  pFv2 = new const byte *[nFeatures];
357  for (word f = 0; f < nFeatures; f++) pFv2[f] = featureVectors[f].ptr<byte>(y-1);
358  }
359 
360  for (int x = 0; x < m_size.width; x++) {
361  size_t idx = (y * m_size.width + x) * m_nLayers;
362 
363  for (word f = 0; f < nFeatures; f++) featureVector1.at<byte>(f, 0) = pFv1[f][x]; // featureVectors[x][y]
364 
365  if (m_gType & GRAPH_EDGES_LINK) {
366  ePot = linkTrainer->getLinkPotentials(featureVector1, linkWeight);
367  add(ePot, ePot.t(), ePot);
368  if (m_nLayers >= 2)
369  m_graph.setArc(idx, idx + 1, ePot);
370  ePot = CTrainEdge::getDefaultEdgePotentials(100, m_graph.getNumStates());
371  for (l = 2; l < m_nLayers; l++)
372  m_graph.setEdge(idx + l - 1, idx + l, ePot);
373  } // edges_link
374 
375  if (m_gType & GRAPH_EDGES_GRID) {
376  if (x > 0) {
377  for (word f = 0; f < nFeatures; f++) featureVector2.at<byte>(f, 0) = pFv1[f][x - 1]; // featureVectors[x-1][y]
378  ePot = edgeTrainer.getEdgePotentials(featureVector1, featureVector2, vParams, edgeWeight);
379  for (word l = 0; l < m_nLayers; l++) m_graph.setArc(idx + l, idx + l - m_nLayers, ePot);
380  } // if x
381 
382  if (y > 0) {
383  for (word f = 0; f < nFeatures; f++) featureVector2.at<byte>(f, 0) = pFv2[f][x]; // featureVectors[x][y-1]
384  ePot = edgeTrainer.getEdgePotentials(featureVector1, featureVector2, vParams, edgeWeight);
385  for (word l = 0; l < m_nLayers; l++) m_graph.setArc(idx + l, idx + l - m_nLayers * m_size.width, ePot);
386  } // if y
387  } // edges_grid
388 
389  if (m_gType & GRAPH_EDGES_DIAG) {
390  if ((x > 0) && (y > 0)) {
391  for (word f = 0; f < nFeatures; f++) featureVector2.at<byte>(f, 0) = pFv2[f][x - 1]; // featureVectors[x-1][y-1]
392  ePot = edgeTrainer.getEdgePotentials(featureVector1, featureVector2, vParams, edgeWeight);
393  for (word l = 0; l < m_nLayers; l++) m_graph.setArc(idx + l, idx + l - m_nLayers * m_size.width - m_nLayers, ePot);
394  } // if x, y
395 
396  if ((x < m_size.width - 1) && (y > 0)) {
397  for (word f = 0; f < nFeatures; f++) featureVector2.at<byte>(f, 0) = pFv2[f][x + 1]; // featureVectors[x+1][y-1]
398  ePot = edgeTrainer.getEdgePotentials(featureVector1, featureVector2, vParams, edgeWeight);
399  for (word l = 0; l < m_nLayers; l++) m_graph.setArc(idx + l, idx + l - m_nLayers * m_size.width + m_nLayers, ePot);
400  } // x, y
401  } // edges_diag
402  } // x
403 #ifdef ENABLE_PPL
404  }); // y
405 #else
406  } // y
407 #endif
408  }
409 
410  void CGraphLayeredExt::defineEdgeGroup(float A, float B, float C, byte group)
411  {
412  // Assertion
413  DGM_ASSERT_MSG(A != 0 || B != 0, "Wrong arguments");
414 
415 #ifdef ENABLE_PPL
416  concurrency::parallel_for(0, m_size.height, [&](int y) {
417 #else
418  for (int y = 0; y < m_size.height; y++) {
419 #endif
420  for (int x = 0; x < m_size.width; x++) {
421  int i = (y * m_size.width + x) * m_nLayers; // index of the current node from the base layer
422  int s = SIGN(A * x + B * y + C); // sign of the current pixel according to the given line
423 
424  if (m_gType & GRAPH_EDGES_GRID) {
425  if (x > 0) {
426  int _x = x - 1;
427  int _y = y;
428  int _s = SIGN(A * _x + B * _y + C);
429  if (s != _s) m_graph.setArcGroup(i, i - m_nLayers, group);
430  } // if x
431  if (y > 0) {
432  int _x = x;
433  int _y = y - 1;
434  int _s = SIGN(A * _x + B * _y + C);
435  if (s != _s) m_graph.setArcGroup(i, i - m_nLayers * m_size.width, group);
436  } // if y
437  }
438 
439  if (m_gType & GRAPH_EDGES_DIAG) {
440  if ((x > 0) && (y > 0)) {
441  int _x = x - 1;
442  int _y = y - 1;
443  int _s = SIGN(A * _x + B * _y + C);
444  if (s != _s) m_graph.setArcGroup(i, i - m_nLayers * m_size.width - m_nLayers, group);
445  } // if x, y
446  if ((x < m_size.width - 1) && (y > 0)) {
447  int _x = x + 1;
448  int _y = y - 1;
449  int _s = SIGN(A * _x + B * _y + C);
450  if (s != _s) m_graph.setArcGroup(i, i - m_nLayers * m_size.width + m_nLayers, group);
451  } // x, y
452  }
453  } // x
454  } // y
455 #ifdef ENABLE_PPL
456  );
457 #endif
458  }
459 
460  void CGraphLayeredExt::setEdges(std::optional<byte> group, const Mat &pot)
461  {
462  if (false) {
463  for (int y = 0; y < m_size.height; y++) {
464  for (int x = 0; x < m_size.width; x++) {
465  int i = (y * m_size.width + x) * m_nLayers; // index of the current node from the base layer
466  if (m_gType & GRAPH_EDGES_GRID) {
467  if (x > 0) {
468  if (m_graph.getEdgeGroup(i, i - m_nLayers) == group)
469  m_graph.setArc(i, i - m_nLayers, pot);
470  }
471  if (y > 0) {
472  if (m_graph.getEdgeGroup(i, i - m_nLayers * m_size.width) == group)
473  m_graph.setArc(i, i - m_nLayers * m_size.width, pot);
474  }
475  }
476  if (m_gType & GRAPH_EDGES_DIAG) {
477  if ((x > 0) && (y > 0)) {
478  if (m_graph.getEdgeGroup(i, i - m_nLayers * m_size.width - m_nLayers) == group)
479  m_graph.setArc(i, i - m_nLayers * m_size.width - m_nLayers, pot);
480  }
481  if ((x < m_size.width - 1) && (y > 0)) {
482  if (m_graph.getEdgeGroup(i, i - m_nLayers * m_size.width + m_nLayers) == group)
483  m_graph.setArc(i, i - m_nLayers * m_size.width + m_nLayers, pot);
484  }
485  }
486  } // x
487  } // y
488  }
489  else {
490  Mat Pot;
491  sqrt(pot, Pot);
492  m_graph.setEdges(group, Pot);
493  }
494  }
495 }
virtual size_t addNode(const Mat &pot=EmptyMat)=0
Adds an additional node (with specified potentional)
byte getNumStates(void) const
Returns number of states (classes)
Definition: Graph.h:99
virtual void setEdges(std::optional< byte > group, const Mat &pot)=0
Sets the potential pot to all edges belonging to group group.
void addArc(size_t Node1, size_t Node2, const Mat &pot=EmptyMat)
Adds an additional udirected edge (arc) with specified potentional.
word getNumFeatures(void) const
Returns number of features.
Definition: ITrain.h:37
Vertical and horizontal edges.
void addFeatureVecs(CTrainEdge &edgeTrainer, const Mat &featureVectors, const Mat &gt)
Adds a block of new feature vectors.
Contrast-Sensitive Potts training class.
const word m_nLayers
Number of layers.
Base abstract class for edge potentials training.
Definition: TrainEdge.h:24
virtual size_t getNumNodes(void) const =0
Returns the number of nodes in the graph.
void addDefaultEdgesModel(float val, float weight=1.0f) override
Adds default data-independet edge model.
Base abstract class for link (inter-layer edge) potentials training.
Definition: TrainLink.h:17
void addEdge(size_t srcNode, size_t dstNode, const Mat &pot=EmptyMat)
Adds an additional directed edge with specified potentional.
void buildGraph(Size graphSize) override
Builds a 2D graph of size corresponding to the image resolution.
void setGraph(const Mat &pots) override
Fills an existing 2D graph with potentials or builds a new 2D graph of size corresponding to pots...
IGraphPairwise & m_graph
The graph.
const byte m_gType
Graph type (Ref. graphEdgesType)
virtual void reset(void)=0
Resets the graph.
virtual void addFeatureVecs(const Mat &featureVector1, byte gt1, const Mat &featureVector2, byte gt2)=0
Adds a pair of feature vectors.
void fillEdges(const CTrainEdge &edgeTrainer, const CTrainLink *linkTrainer, const Mat &featureVectors, const vec_float_t &vParams, float edgeWeight=1.0f, float linkWeight=1.0f)
Fills the graph edges with potentials.
static Mat getDefaultEdgePotentials(float val, byte nStates)
Returns the data-independent edge potentials.
Definition: TrainEdge.h:74