1 #include "GraphLayeredExt.h" 2 #include "GraphPairwise.h" 6 #include "TrainEdgePotts.h" 8 #include "TrainEdgePottsCS.h" 19 for (
int y = 0; y <
m_size.height; y++)
20 for (
int x = 0; x <
m_size.width; x++) {
45 for (
int y = 0; y <
m_size.height; y++) {
46 for (
int x = 0; x <
m_size.width; x++) {
49 if ((x > 0) && (y > 0))
53 if ((x < graphSize.width - 1) && (y > 0))
63 DGM_ASSERT_MSG(
m_nLayers == 1,
"When more than 1 layer is present, use CGraphLayeredExt::setGraph(const Mat&, const Mat&) function instead.");
69 if (weight != 1.0f) val = powf(val, weight);
78 const word nFeatures = featureVectors.channels();
80 fillEdges(edgeTrainer, NULL, featureVectors, { val, 0.001f }, weight);
87 const word nFeatures =
static_cast<word
>(featureVectors.size());
89 fillEdges(edgeTrainer, NULL, featureVectors, { val, 0.001f }, weight);
96 DGM_ASSERT(!potBase.empty());
97 DGM_ASSERT(CV_32F == potBase.depth());
98 if (!potOccl.empty()) {
99 DGM_ASSERT(potBase.size() == potOccl.size());
100 DGM_ASSERT(CV_32F == potOccl.depth());
103 DGM_ASSERT(
m_size.height == potBase.rows);
104 DGM_ASSERT(
m_size.width == potBase.cols);
107 byte nStatesBase =
static_cast<byte
>(potBase.channels());
108 byte nStatesOccl = potOccl.empty() ? 0 :
static_cast<byte
>(potOccl.channels());
109 if (
m_nLayers >= 2) DGM_ASSERT(nStatesOccl);
113 concurrency::parallel_for(0,
m_size.height, [&, nStatesBase, nStatesOccl](
int y) {
114 Mat nPotBase(m_graph.getNumStates(), 1, CV_32FC1, Scalar(0.0f));
115 Mat nPotOccl(m_graph.getNumStates(), 1, CV_32FC1, Scalar(0.0f));
116 Mat nPotIntr(m_graph.getNumStates(), 1, CV_32FC1, Scalar(0.0f));
117 for (byte s = 0; s < nStatesOccl; s++)
118 nPotIntr.at<float>(m_graph.getNumStates() - nStatesOccl + s, 0) = 100.0f / nStatesOccl;
120 Mat nPotBase(m_graph.getNumStates(), 1, CV_32FC1, Scalar(0.0f));
121 Mat nPotOccl(m_graph.getNumStates(), 1, CV_32FC1, Scalar(0.0f));
122 Mat nPotIntr(m_graph.getNumStates(), 1, CV_32FC1, Scalar(0.0f));
123 for (byte s = 0; s < nStatesOccl; s++)
124 nPotIntr.at<float>(m_graph.getNumStates() - nStatesOccl + s, 0) = 100.0f / nStatesOccl;
125 for (int y = 0; y < m_size.height; y++) {
127 const float *pPotBase = potBase.ptr<float>(y);
128 const float *pPotOccl = potOccl.empty() ? NULL : potOccl.ptr<float>(y);
129 for (int x = 0; x < m_size.width; x++) {
130 size_t idx = (y * m_size.width + x) * m_nLayers;
132 for (byte s = 0; s < nStatesBase; s++)
133 nPotBase.at<float>(s, 0) = pPotBase[nStatesBase * x + s];
134 m_graph.setNode(idx, nPotBase);
136 if (m_nLayers >= 2) {
137 for (byte s = 0; s < nStatesOccl; s++)
138 nPotOccl.at<float>(m_graph.getNumStates() - nStatesOccl + s, 0) = pPotOccl[nStatesOccl * x + s];
139 m_graph.setNode(idx + 1, nPotOccl);
142 for (word l = 2; l < m_nLayers; l++)
143 m_graph.setNode(idx + l, nPotIntr);
154 DGM_ASSERT_MSG(featureVectors.size() == gt.size(),
"The size of <featureVectors> does not correspond to the size of <gt>");
155 DGM_ASSERT_MSG(featureVectors.depth() == CV_8U,
"The argument <featureVectors> has wrong depth");
156 DGM_ASSERT_MSG(gt.type() == CV_8UC1,
"The argument <gt> has either wrong depth or more than one channel");
157 DGM_ASSERT_MSG(featureVectors.channels() == edgeTrainer.
getNumFeatures(),
158 "Number of features in the <featureVectors> (%d) does not correspond to the specified (%d)", featureVectors.channels(), edgeTrainer.
getNumFeatures());
160 const word nFeatures = featureVectors.channels();
162 Mat featureVector1(nFeatures, 1, CV_8UC1);
163 Mat featureVector2(nFeatures, 1, CV_8UC1);
165 for (
int y = 0; y < gt.rows; y++) {
166 const byte *pFV1 = featureVectors.ptr<byte>(y);
167 const byte *pFV2 = y > 0 ? featureVectors.ptr<byte>(y - 1) : NULL;
168 const byte *pGt1 = gt.ptr<byte>(y);
169 const byte *pGt2 = y > 0 ? gt.ptr<byte>(y - 1) : NULL;
170 for (
int x = 0; x < gt.cols; x++) {
171 for (word f = 0; f < nFeatures; f++) featureVector1.at<byte>(f, 0) = pFV1[nFeatures * x + f];
174 for (word f = 0; f < nFeatures; f++) featureVector2.at<byte>(f, 0) = pFV1[nFeatures * (x - 1) + f];
175 edgeTrainer.
addFeatureVecs(featureVector1, pGt1[x], featureVector2, pGt1[x - 1]);
176 edgeTrainer.
addFeatureVecs(featureVector2, pGt1[x - 1], featureVector1, pGt1[x]);
179 for (word f = 0; f < nFeatures; f++) featureVector2.at<byte>(f, 0) = pFV2[nFeatures * x + f];
180 edgeTrainer.
addFeatureVecs(featureVector1, pGt1[x], featureVector2, pGt2[x]);
181 edgeTrainer.
addFeatureVecs(featureVector2, pGt2[x], featureVector1, pGt1[x]);
185 if ((x > 0) && (y > 0)) {
186 for (word f = 0; f < nFeatures; f++) featureVector2.at<byte>(f, 0) = pFV2[nFeatures * (x - 1) + f];
187 edgeTrainer.
addFeatureVecs(featureVector1, pGt1[x], featureVector2, pGt2[x - 1]);
188 edgeTrainer.
addFeatureVecs(featureVector2, pGt2[x - 1], featureVector1, pGt1[x]);
190 if ((x < gt.cols - 1) && (y > 0)) {
191 for (word f = 0; f < nFeatures; f++) featureVector2.at<byte>(f, 0) = pFV2[nFeatures * (x + 1) + f];
192 edgeTrainer.
addFeatureVecs(featureVector1, pGt1[x], featureVector2, pGt2[x + 1]);
193 edgeTrainer.
addFeatureVecs(featureVector2, pGt2[x + 1], featureVector1, pGt1[x]);
203 DGM_ASSERT_MSG(featureVectors[0].size() == gt.size(),
"The size of <featureVectors> does not correspond to the size of <gt>");
204 DGM_ASSERT_MSG(featureVectors[0].type() == CV_8UC1,
"The argument <featureVectors> has either wrong depth or more than one channel");
205 DGM_ASSERT_MSG(gt.type() == CV_8UC1,
"The argument <gt> has either wrong depth or more than one channel");
206 DGM_ASSERT_MSG(featureVectors.size() == edgeTrainer.
getNumFeatures(),
207 "Number of features in the <featureVectors> (%zu) does not correspond to the specified (%d)", featureVectors.size(), edgeTrainer.
getNumFeatures());
209 const word nFeatures =
static_cast<word
>(featureVectors.size());
211 Mat featureVector1(nFeatures, 1, CV_8UC1);
212 Mat featureVector2(nFeatures, 1, CV_8UC1);
214 std::vector<const byte *> vFV1(nFeatures);
215 std::vector<const byte *> vFV2(nFeatures);
216 for (
int y = 0; y < gt.rows; y++) {
217 for (word f = 0; f < nFeatures; f++) {
218 vFV1[f] = featureVectors[f].ptr<byte>(y);
219 if (y > 0) vFV2[f] = featureVectors[f].ptr<byte>(y - 1);
221 const byte *pGt1 = gt.ptr<byte>(y);
222 const byte *pGt2 = y > 0 ? gt.ptr<byte>(y - 1) : NULL;
223 for (
int x = 0; x < gt.cols; x++) {
224 for (word f = 0; f < nFeatures; f++) featureVector1.at<byte>(f, 0) = vFV1[f][x];
227 for (word f = 0; f < nFeatures; f++) featureVector2.at<byte>(f, 0) = vFV1[f][x - 1];
228 edgeTrainer.
addFeatureVecs(featureVector1, pGt1[x], featureVector2, pGt1[x - 1]);
229 edgeTrainer.
addFeatureVecs(featureVector2, pGt1[x - 1], featureVector1, pGt1[x]);
232 for (word f = 0; f < nFeatures; f++) featureVector2.at<byte>(f, 0) = vFV2[f][x];
233 edgeTrainer.
addFeatureVecs(featureVector1, pGt1[x], featureVector2, pGt2[x]);
234 edgeTrainer.
addFeatureVecs(featureVector2, pGt2[x], featureVector1, pGt1[x]);
238 if ((x > 0) && (y > 0)) {
239 for (word f = 0; f < nFeatures; f++) featureVector2.at<byte>(f, 0) = vFV2[f][x - 1];
240 edgeTrainer.
addFeatureVecs(featureVector1, pGt1[x], featureVector2, pGt2[x - 1]);
241 edgeTrainer.
addFeatureVecs(featureVector2, pGt2[x - 1], featureVector1, pGt1[x]);
243 if ((x < gt.cols - 1) && (y > 0)) {
244 for (word f = 0; f < nFeatures; f++) featureVector2.at<byte>(f, 0) = vFV2[f][x + 1];
245 edgeTrainer.
addFeatureVecs(featureVector1, pGt1[x], featureVector2, pGt2[x + 1]);
246 edgeTrainer.
addFeatureVecs(featureVector2, pGt2[x + 1], featureVector1, pGt1[x]);
255 const word nFeatures = featureVectors.channels();
258 DGM_ASSERT(
m_size.height == featureVectors.rows);
259 DGM_ASSERT(
m_size.width == featureVectors.cols);
261 if (linkTrainer) DGM_ASSERT(nFeatures == linkTrainer->
getNumFeatures());
265 concurrency::parallel_for(0,
m_size.height, [&, nFeatures](
int y) {
266 Mat featureVector1(nFeatures, 1, CV_8UC1);
267 Mat featureVector2(nFeatures, 1, CV_8UC1);
271 Mat featureVector1(nFeatures, 1, CV_8UC1);
272 Mat featureVector2(nFeatures, 1, CV_8UC1);
275 for (int y = 0; y < m_size.height; y++) {
277 const byte *pFv1 = featureVectors.ptr<byte>(y);
278 const byte *pFv2 = (y > 0) ? featureVectors.ptr<byte>(y - 1) : NULL;
279 for (int x = 0; x < m_size.width; x++) {
280 size_t idx = (y * m_size.width + x) * m_nLayers;
281 for (word f = 0; f < nFeatures; f++) featureVector1.at<byte>(f, 0) = pFv1[nFeatures * x + f];
283 if (m_gType & GRAPH_EDGES_LINK) {
284 ePot = linkTrainer->getLinkPotentials(featureVector1, linkWeight);
285 add(ePot, ePot.t(), ePot);
287 m_graph.setArc(idx, idx + 1, ePot);
288 ePot = CTrainEdge::getDefaultEdgePotentials(100, m_graph.getNumStates());
289 for (l = 2; l < m_nLayers; l++)
290 m_graph.setEdge(idx + l - 1, idx + l, ePot);
293 if (m_gType & GRAPH_EDGES_GRID) {
295 for (word f = 0; f < nFeatures; f++) featureVector2.at<byte>(f, 0) = pFv1[nFeatures * (x - 1) + f];
296 ePot = edgeTrainer.getEdgePotentials(featureVector1, featureVector2, vParams, edgeWeight);
297 for (word l = 0; l < m_nLayers; l++) m_graph.setArc(idx + l, idx + l - m_nLayers, ePot);
301 for (word f = 0; f < nFeatures; f++) featureVector2.at<byte>(f, 0) = pFv2[nFeatures * x + f];
302 ePot = edgeTrainer.getEdgePotentials(featureVector1, featureVector2, vParams, edgeWeight);
303 for (word l = 0; l < m_nLayers; l++) m_graph.setArc(idx + l, idx + l - m_nLayers * m_size.width, ePot);
307 if (m_gType & GRAPH_EDGES_DIAG) {
308 if ((x > 0) && (y > 0)) {
309 for (word f = 0; f < nFeatures; f++) featureVector2.at<byte>(f, 0) = pFv2[nFeatures * (x - 1) + f];
310 ePot = edgeTrainer.getEdgePotentials(featureVector1, featureVector2, vParams, edgeWeight);
311 for (word l = 0; l < m_nLayers; l++) m_graph.setArc(idx + l, idx + l - m_nLayers * m_size.width - m_nLayers, ePot);
314 if ((x < m_size.width - 1) && (y > 0)) {
315 for (word f = 0; f < nFeatures; f++) featureVector2.at<byte>(f, 0) = pFv2[nFeatures * (x + 1) + f];
316 ePot = edgeTrainer.getEdgePotentials(featureVector1, featureVector2, vParams, edgeWeight);
317 for (word l = 0; l < m_nLayers; l++) m_graph.setArc(idx + l, idx + l - m_nLayers * m_size.width + m_nLayers, ePot);
328 void CGraphLayeredExt::fillEdges(
const CTrainEdge& edgeTrainer,
const CTrainLink* linkTrainer,
const vec_mat_t& featureVectors,
const vec_float_t& vParams,
float edgeWeight,
float linkWeight)
330 const word nFeatures =
static_cast<word
>(featureVectors.size());
333 DGM_ASSERT(m_size.height == featureVectors[0].rows);
334 DGM_ASSERT(m_size.width == featureVectors[0].cols);
336 if (linkTrainer) DGM_ASSERT(nFeatures == linkTrainer->
getNumFeatures());
337 DGM_ASSERT(m_size.width * m_size.height * m_nLayers == m_graph.getNumNodes());
340 concurrency::parallel_for(0, m_size.height, [&, nFeatures](
int y) {
341 Mat featureVector1(nFeatures, 1, CV_8UC1);
342 Mat featureVector2(nFeatures, 1, CV_8UC1);
346 Mat featureVector1(nFeatures, 1, CV_8UC1);
347 Mat featureVector2(nFeatures, 1, CV_8UC1);
350 for (int y = 0; y < m_size.height; y++) {
352 byte const **pFv1 = new const byte * [nFeatures];
353 for (word f = 0; f < nFeatures; f++) pFv1[f] = featureVectors[f].ptr<byte>(y);
354 byte const **pFv2 = NULL;
356 pFv2 = new const byte *[nFeatures];
357 for (word f = 0; f < nFeatures; f++) pFv2[f] = featureVectors[f].ptr<byte>(y-1);
360 for (int x = 0; x < m_size.width; x++) {
361 size_t idx = (y * m_size.width + x) * m_nLayers;
363 for (word f = 0; f < nFeatures; f++) featureVector1.at<byte>(f, 0) = pFv1[f][x];
365 if (m_gType & GRAPH_EDGES_LINK) {
366 ePot = linkTrainer->getLinkPotentials(featureVector1, linkWeight);
367 add(ePot, ePot.t(), ePot);
369 m_graph.setArc(idx, idx + 1, ePot);
370 ePot = CTrainEdge::getDefaultEdgePotentials(100, m_graph.getNumStates());
371 for (l = 2; l < m_nLayers; l++)
372 m_graph.setEdge(idx + l - 1, idx + l, ePot);
375 if (m_gType & GRAPH_EDGES_GRID) {
377 for (word f = 0; f < nFeatures; f++) featureVector2.at<byte>(f, 0) = pFv1[f][x - 1];
378 ePot = edgeTrainer.getEdgePotentials(featureVector1, featureVector2, vParams, edgeWeight);
379 for (word l = 0; l < m_nLayers; l++) m_graph.setArc(idx + l, idx + l - m_nLayers, ePot);
383 for (word f = 0; f < nFeatures; f++) featureVector2.at<byte>(f, 0) = pFv2[f][x];
384 ePot = edgeTrainer.getEdgePotentials(featureVector1, featureVector2, vParams, edgeWeight);
385 for (word l = 0; l < m_nLayers; l++) m_graph.setArc(idx + l, idx + l - m_nLayers * m_size.width, ePot);
389 if (m_gType & GRAPH_EDGES_DIAG) {
390 if ((x > 0) && (y > 0)) {
391 for (word f = 0; f < nFeatures; f++) featureVector2.at<byte>(f, 0) = pFv2[f][x - 1];
392 ePot = edgeTrainer.getEdgePotentials(featureVector1, featureVector2, vParams, edgeWeight);
393 for (word l = 0; l < m_nLayers; l++) m_graph.setArc(idx + l, idx + l - m_nLayers * m_size.width - m_nLayers, ePot);
396 if ((x < m_size.width - 1) && (y > 0)) {
397 for (word f = 0; f < nFeatures; f++) featureVector2.at<byte>(f, 0) = pFv2[f][x + 1];
398 ePot = edgeTrainer.getEdgePotentials(featureVector1, featureVector2, vParams, edgeWeight);
399 for (word l = 0; l < m_nLayers; l++) m_graph.setArc(idx + l, idx + l - m_nLayers * m_size.width + m_nLayers, ePot);
410 void CGraphLayeredExt::defineEdgeGroup(
float A,
float B,
float C, byte group)
413 DGM_ASSERT_MSG(A != 0 || B != 0,
"Wrong arguments");
416 concurrency::parallel_for(0, m_size.height, [&](
int y) {
418 for (int y = 0; y < m_size.height; y++) {
420 for (int x = 0; x < m_size.width; x++) {
421 int i = (y * m_size.width + x) * m_nLayers;
422 int s = SIGN(A * x + B * y + C);
424 if (m_gType & GRAPH_EDGES_GRID) {
428 int _s = SIGN(A * _x + B * _y + C);
429 if (s != _s) m_graph.setArcGroup(i, i - m_nLayers, group);
434 int _s = SIGN(A * _x + B * _y + C);
435 if (s != _s) m_graph.setArcGroup(i, i - m_nLayers * m_size.width, group);
439 if (m_gType & GRAPH_EDGES_DIAG) {
440 if ((x > 0) && (y > 0)) {
443 int _s = SIGN(A * _x + B * _y + C);
444 if (s != _s) m_graph.setArcGroup(i, i - m_nLayers * m_size.width - m_nLayers, group);
446 if ((x < m_size.width - 1) && (y > 0)) {
449 int _s = SIGN(A * _x + B * _y + C);
450 if (s != _s) m_graph.setArcGroup(i, i - m_nLayers * m_size.width + m_nLayers, group);
460 void CGraphLayeredExt::setEdges(std::optional<byte> group,
const Mat &pot)
463 for (
int y = 0; y < m_size.height; y++) {
464 for (
int x = 0; x < m_size.width; x++) {
465 int i = (y * m_size.width + x) * m_nLayers;
468 if (m_graph.getEdgeGroup(i, i - m_nLayers) == group)
469 m_graph.setArc(i, i - m_nLayers, pot);
472 if (m_graph.getEdgeGroup(i, i - m_nLayers * m_size.width) == group)
473 m_graph.setArc(i, i - m_nLayers * m_size.width, pot);
477 if ((x > 0) && (y > 0)) {
478 if (m_graph.getEdgeGroup(i, i - m_nLayers * m_size.width - m_nLayers) == group)
479 m_graph.setArc(i, i - m_nLayers * m_size.width - m_nLayers, pot);
481 if ((x < m_size.width - 1) && (y > 0)) {
482 if (m_graph.getEdgeGroup(i, i - m_nLayers * m_size.width + m_nLayers) == group)
483 m_graph.setArc(i, i - m_nLayers * m_size.width + m_nLayers, pot);
492 m_graph.setEdges(group, Pot);
Links (inter-layer edges)
virtual size_t addNode(const Mat &pot=EmptyMat)=0
Adds an additional node (with specified potentional)
byte getNumStates(void) const
Returns number of states (classes)
virtual void setEdges(std::optional< byte > group, const Mat &pot)=0
Sets the potential pot to all edges belonging to group group.
void addArc(size_t Node1, size_t Node2, const Mat &pot=EmptyMat)
Adds an additional udirected edge (arc) with specified potentional.
word getNumFeatures(void) const
Returns number of features.
Vertical and horizontal edges.
void addFeatureVecs(CTrainEdge &edgeTrainer, const Mat &featureVectors, const Mat >)
Adds a block of new feature vectors.
Contrast-Sensitive Potts training class.
const word m_nLayers
Number of layers.
Base abstract class for edge potentials training.
Size m_size
Size of the graph.
virtual size_t getNumNodes(void) const =0
Returns the number of nodes in the graph.
void addDefaultEdgesModel(float val, float weight=1.0f) override
Adds default data-independet edge model.
Base abstract class for link (inter-layer edge) potentials training.
void addEdge(size_t srcNode, size_t dstNode, const Mat &pot=EmptyMat)
Adds an additional directed edge with specified potentional.
void buildGraph(Size graphSize) override
Builds a 2D graph of size corresponding to the image resolution.
void setGraph(const Mat &pots) override
Fills an existing 2D graph with potentials or builds a new 2D graph of size corresponding to pots...
IGraphPairwise & m_graph
The graph.
const byte m_gType
Graph type (Ref. graphEdgesType)
virtual void reset(void)=0
Resets the graph.
virtual void addFeatureVecs(const Mat &featureVector1, byte gt1, const Mat &featureVector2, byte gt2)=0
Adds a pair of feature vectors.
void fillEdges(const CTrainEdge &edgeTrainer, const CTrainLink *linkTrainer, const Mat &featureVectors, const vec_float_t &vParams, float edgeWeight=1.0f, float linkWeight=1.0f)
Fills the graph edges with potentials.
static Mat getDefaultEdgePotentials(float val, byte nStates)
Returns the data-independent edge potentials.