Learning OpenCV Lecture 4 (Transforming Images with Morphological Operations)

In this chapter, we will cover:
  • Eroding and dilating images using morphological filters
  • Opening and closing images using morphological filters
  • Detecting edges and corners using morphological filters
  • Segmenting images using watersheds 分水岭算法
  • Extracting foreground objects with the GrabCut algorithm
Eroding、dilating、Opening、closing
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>

int main()
{
      // Read input image
     cv::Mat image= cv::imread("../binary.bmp" );
      if (!image.data)
            return 0;

    // Display the image
     cv::namedWindow( "Image");
     cv::imshow( "Image",image);

      // Erode the image
     cv::Mat eroded;
     cv::erode(image,eroded,cv::Mat());

    // Display the eroded image
     cv::namedWindow( "Eroded Image");
     cv::imshow( "Eroded Image",eroded);

      // Dilate the image
     cv::Mat dilated;
     cv::dilate(image,dilated,cv::Mat());

    // Display the dialted image
     cv::namedWindow( "Dilated Image");
     cv::imshow( "Dilated Image",dilated);

      // Erode the image with a larger s.e.
     cv::Mat element(7,7,CV_8U,cv::Scalar(1));
     cv::erode(image,eroded,element);

    // Display the eroded image
     cv::namedWindow( "Eroded Image (7x7)");
     cv::imshow( "Eroded Image (7x7)",eroded);

      // Erode the image 3 times.
     cv::erode(image,eroded,cv::Mat(),cv::Point(-1,-1),3);

    // Display the eroded image
     cv::namedWindow( "Eroded Image (3 times)");
     cv::imshow( "Eroded Image (3 times)",eroded);

      // Close the image
     cv::Mat element5(5,5,CV_8U,cv::Scalar(1));
     cv::Mat closed;
     cv::morphologyEx(image,closed,cv::MORPH_CLOSE,element5);

    // Display the opened image
     cv::namedWindow( "Closed Image");
     cv::imshow( "Closed Image",closed);

      // Open the image
     cv::Mat opened;
     cv::morphologyEx(image,opened,cv::MORPH_OPEN,element5);

    // Display the opened image
     cv::namedWindow( "Opened Image");
     cv::imshow( "Opened Image",opened);

      // Close and Open the image
     cv::morphologyEx(image,image,cv::MORPH_CLOSE,element5);
     cv::morphologyEx(image,image,cv::MORPH_OPEN,element5);

    // Display the close/opened image
     cv::namedWindow( "Closed and Opened Image");
     cv::imshow( "Closed and Opened Image",image);
     cv::imwrite( "binaryGroup.bmp",image);

      // Read input image
     image= cv::imread("../binary.bmp");

      // Open and Close the image
     cv::morphologyEx(image,image,cv::MORPH_OPEN,element5);
     cv::morphologyEx(image,image,cv::MORPH_CLOSE,element5);

    // Display the close/opened image
     cv::namedWindow( "Opened and Closed Image");
     cv::imshow( "Opened and Closed Image",image);

     cv::waitKey();
      return 0;
}

  results:

 

Detecting edges and corners using morphological filters
morphoFeatures.h
#if !defined MORPHOF
#define MORPHOF

#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>

class MorphoFeatures {
private:
      // threshold to produce binary image
      int threshold;
      // structuring elements used in corner detection
     cv::Mat cross;
     cv::Mat diamond;
     cv::Mat square;
     cv::Mat x;

public:

     MorphoFeatures() : threshold(-1),
                 cross(5, 5, CV_8U, cv::Scalar(0)),
                 diamond(5, 5, CV_8U, cv::Scalar(0)),
                 square(5, 5, CV_8U, cv::Scalar(0)),
                 x(5, 5, CV_8U, cv::Scalar(0)) {

            // Creating the cross-shaped structuring element
            for (int i = 0; i < 5; i++) {
                 cross.at<uchar>(2, i) = 1;
                 cross.at<uchar>(i, 2) = 1;
            }

            // Creating the diamond-shaped structuring element
            diamond.at<uchar>(0, 0) = 0;
            diamond.at<uchar>(0, 1) = 0;
            diamond.at<uchar>(1, 0) = 0;
            diamond.at<uchar>(4, 4) = 0;
            diamond.at<uchar>(3, 4) = 0;
            diamond.at<uchar>(4, 3) = 0;
            diamond.at<uchar>(4, 0) = 0;
            diamond.at<uchar>(4, 1) = 0;
            diamond.at<uchar>(3, 0) = 0;
            diamond.at<uchar>(0, 4) = 0;
            diamond.at<uchar>(0, 3) = 0;
            diamond.at<uchar>(1, 4) = 0;

            // Creating the x-shaped structuring element
            for (int i = 0; i < 5; i++) {
                 x.at<uchar>(i, i) = 1;
                 x.at<uchar>(4 - i, i) = 1;
            }
     }

      void setThreshold(int t) {
            if (t > 0)
                 threshold = t;
     }

      int getThreshold() const {
            return threshold;
     }

     cv::Mat getEdges(const cv::Mat &image) {
            // Get the gradient image
            cv::Mat result;
            cv::morphologyEx(image, result, cv::MORPH_GRADIENT, cv::Mat());

            // Apply threshold to obtain a binary image
            applyThreshold(result);
            return result;
     }

      void applyThreshold(cv::Mat &result) {
            // Apply threshold on result
            if (threshold > 0) {
                 cv::threshold(result, result, threshold, 255, cv::THRESH_BINARY_INV);
            }
     }

     cv::Mat getCorners(const cv::Mat &image) {

            cv::Mat result;

            // Dilate with a cross
            cv::dilate(image, result, cross);

            // Erode with a diamond
            cv::erode(result, result, diamond);

            cv::Mat result2;
            // Dilate with a x
            cv::dilate(image, result2, x);

            // Erode with a square
            cv::erode(result2, result2, square);

            // Corners are obtained by differencing
            // the two closed images
            cv::absdiff(result2, result, result);

            // Apply threshold to obtain a binary image
            applyThreshold(result);

            return result;
     }

      void drawOnImage(const cv::Mat &binary, cv::Mat &image) {
            cv::Mat_<uchar>::const_iterator it = binary.begin<uchar>();
            cv::Mat_<uchar>::const_iterator itend = binary.end<uchar>();

            // for each pixel
            for (int i = 0; it != itend; ++it, ++i) {
                 if (!*it) {
                      cv::circle(image,
                            cv::Point(i%image.step, i/image.step),
                            5, cv::Scalar(255, 0, 0));
                 }
            }
     }

};

#endif

  morph.cpp

#include <iostream>

#include "morphoFeatures.h"

int main() {

     cv::Mat image = cv::imread( "../building.jpg");
     cv::cvtColor(image, image, CV_BGR2GRAY);

      // Create the morphological features instance
     MorphoFeatures morpho;
     morpho.setThreshold(40);

      // Get the edges
     cv::Mat edges;
     edges = morpho.getEdges(image);

     cv::namedWindow( "Edges Image", CV_WINDOW_AUTOSIZE);
     cv::imshow( "Edges Image", edges);

      // Get the corners
     cv::Mat corners;
     corners = morpho.getCorners(image);
      // Display the corner on the image
     morpho.drawOnImage(corners, image);
     cv::namedWindow( "Corners on Image", CV_WINDOW_AUTOSIZE);
     cv::imshow( "Corners on Image", image);

     cv::waitKey(0);

      return 0;

}

  results:

 

Segmenting images using watersheds
watershedSegment.h
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>

class WatershedSegmenter {
private:

     cv::Mat markers;

public:

      void setMarkers(const cv::Mat &markerImage) {
           
            // Convert to image of ints
            markerImage.convertTo(markers, CV_32S);
     }

     cv::Mat process(const cv::Mat &image) {

            // Apply watershed
            cv::watershed(image, markers);

            return markers;
     }

      // Return result in the form of an image
     cv::Mat getSegmentation() {

            cv::Mat tmp;
            // all segment with label higher than 255
            // will be assigned value 255
            markers.convertTo(tmp, CV_8U);

            return tmp;
     }

      // Return watershed in the form of an image
     cv::Mat getWatersheds() {
           
            cv::Mat tmp;
            // Each pixel p is transform into
            // 255p + 255 befor conversion
            markers.convertTo(tmp, CV_8U, 255, 255);

            return tmp;
     }
};

  

// Read input image
     cv::Mat image = cv::imread( "../group.jpg");
      if (!image.data) {
            return 0;
     }

      // Display the image
     cv::namedWindow( "Original Image");
     cv::imshow( "Original Image", image);

      // Get the binary image
     cv::Mat binary;
     binary = cv::imread( "../binary.bmp", 0);

      // Display the binary image
     cv::namedWindow( "Binary Image");
     cv::imshow( "Binary Image", binary);

      // Eliminate noise and smaller objects
     cv::Mat fg;
     cv::erode(binary, fg, cv::Mat(), cv::Point(-1, -1), 6);

      // Display the foreground image
     cv::namedWindow( "Foreground Image");
     cv::imshow( "Foreground Image", fg);

  results:

// Identify image pixels without objects
     cv::Mat bg;
     cv::dilate(binary, bg, cv::Mat(), cv::Point(-1, -1), 6);
     cv::threshold(bg, bg, 1, 128, cv::THRESH_BINARY_INV);

      // Display the backgroud image
     cv::namedWindow( "Background Image");
     cv::imshow( "Background Image", bg);

  results:

// Show markers image
     cv::Mat markers(binary.size(), CV_8U, cv::Scalar(0));
     markers = fg + bg;
     cv::namedWindow( "Markers");
     cv::imshow( "Markers", markers);

  

// Create watershed segmentation object
     WatershedSegmenter segmenter;

      // Set markers and process
     segmenter.setMarkers(markers);
     segmenter.process(image);

      // Display segmentation result
     cv::namedWindow( "Segmentation");
     cv::imshow( "Segmentation", segmenter.getSegmentation());

      // Display watersheds
     cv::namedWindow( "Watershed");
     cv::imshow( "Watershed", segmenter.getWatersheds());

  

// Open another image------------------------------------
     image = cv::imread( "../tower.jpg");

      // Identify background pixels
     cv::Mat imageMask(image.size(), CV_8U, cv::Scalar(0));
     cv::rectangle(imageMask, cv::Point(5, 5), cv::Point(image.cols - 5, image.rows - 5), cv::Scalar(255), 3);
      // Identify forground pixels (in the middle of the image)
     cv::rectangle(imageMask, cv::Point(image.cols / 2 - 10, image.rows / 2 - 10),
                                      cv::Point(image.cols / 2 + 10, image.rows / 2 + 10), cv::Scalar(1), 10);

      // Set markers and process
     segmenter.setMarkers(imageMask);
     segmenter.process(image);

      // Display the image with markers
     cv::rectangle(image, cv::Point(5, 5), cv::Point(image.cols - 5, image.rows - 5), cv::Scalar(255, 255, 255), 3);
     cv::rectangle(image, cv::Point(image.cols / 2 - 10, image.rows / 2 - 10),
                                 cv::Point(image.cols / 2 + 10, image.rows / 2 + 10), cv::Scalar(1, 1, 1), 10);
     cv::namedWindow( "Image with marker");
     cv::imshow( "Image with marker", image);

      // Display watersheds
     cv::namedWindow( "Watersheds of foreground object");
     cv::imshow( "Watersheds of foreground object", segmenter.getWatersheds());

  results:

 

Extracting foreground objects with the GrabCut algorithm
// Open another image
     image = cv::imread( "../tower.jpg");

      // define bounding rectange
     cv::Rect rectangle(50, 70, image.cols - 150, image.rows - 180);

     cv::Mat result;  // segmentation result (4 possible values)
     cv::Mat bgModel, fgModel; // the models (internally used)
      // GrabCut segmentation
     cv::grabCut(image,          // input image
            result,                     // segmentation result
            rectangle,            // rectangle containing foreground
            bgModel, fgModel,     // models
            1,                          //number of iterations
            cv::GC_INIT_WITH_RECT// use rectangle
            );

      // Get the pixles marked as likely foreground
     cv::compare(result, cv::GC_PR_FGD, result, cv::CMP_EQ);
      // Generate output image
     cv::Mat foreground(image.size(), CV_8UC3, cv::Scalar(255, 255, 255));
     image.copyTo(foreground, result); // bg pixels not copied

      // draw rectangle on original image
     cv::rectangle(image, rectangle, cv::Scalar(255,255,255),1);
     cv::namedWindow( "Image");
     cv::imshow( "Image",image);

      // display result
     cv::namedWindow( "Segmented Image");
     cv::imshow( "Segmented Image",foreground);

  

// Open another image
     image= cv::imread("../group.jpg");

      // define bounding rectangle
     cv::Rect rectangle2(10,100,380,180);

     cv::Mat bkgModel,fgrModel; // the models (internally used)
      // GrabCut segmentation
     cv::grabCut(image,   // input image
            result, // segmentation result
           rectangle2,bkgModel,fgrModel,5,cv::GC_INIT_WITH_RECT);
      // Get the pixels marked as likely foreground
      //   cv::compare(result,cv::GC_PR_FGD,result,cv::CMP_EQ);
     result= result&1;
     foreground.create(image.size(),CV_8UC3);
     foreground.setTo(cv::Scalar(255,255,255));
     image.copyTo(foreground,result); // bg pixels not copied

      // draw rectangle on original image
     cv::rectangle(image, rectangle2, cv::Scalar(255,255,255),1);
     cv::namedWindow( "Image 2");
     cv::imshow( "Image 2",image);

      // display result
     cv::namedWindow( "Foreground objects");
     cv::imshow( "Foreground objects",foreground);

  

posted @ 2014-07-22 21:08  starlitnext  阅读(883)  评论(0编辑  收藏  举报