[openrtm-commit:02366] r181 - in trunk/ImageProcessing/opencv/components: Affine/include/Affine Affine/src BackGroundSubtractionSimple/include/BackGroundSubtractionSimple BackGroundSubtractionSimple/src Binarization/include/Binarization Binarization/src CameraViewer/include/CameraViewer CameraViewer/src Chromakey/include/Chromakey Chromakey/src Dilationerosion/include/DilationErosion Dilationerosion/src Edge/include/Edge Edge/src Findcontour/include/Findcontour Findcontour/src Flip/include/Flip Flip/src Histogram/include/Histogram Histogram/src Houghline/include/Hough Houghline/src ImageCalibration/include/ImageCalibration ImageCalibration/src ImageSubstraction/include/ImageSubstraction ImageSubstraction/src MFCamera/include/MFCamera MFCamera/src ObjectTracking/include/ObjectTracking ObjectTracking/src OpenCVCamera/include/OpenCVCamera OpenCVCamera/src Perspective/include/Perspective Perspective/src RockPaperScissors/include/RockPaperScissors RockPaperScissors/src Rotate/include/Rotate Rotate/src Scale/include/Scale Scale/src Sepia/include/Sepia Sepia/src SubStractCaptureImage/include/SubStractCaptureImage SubStractCaptureImage/src Template/include/Template Template/src Translate/include/Translate Translate/src
openrtm @ openrtm.org
openrtm @ openrtm.org
2017年 2月 2日 (木) 09:05:08 JST
Author: miyamoto
Date: 2017-02-02 09:05:08 +0900 (Thu, 02 Feb 2017)
New Revision: 181
Modified:
trunk/ImageProcessing/opencv/components/Affine/include/Affine/Affine.h
trunk/ImageProcessing/opencv/components/Affine/src/Affine.cpp
trunk/ImageProcessing/opencv/components/BackGroundSubtractionSimple/include/BackGroundSubtractionSimple/BackGroundSubtractionSimple.h
trunk/ImageProcessing/opencv/components/BackGroundSubtractionSimple/src/BackGroundSubtractionSimple.cpp
trunk/ImageProcessing/opencv/components/Binarization/include/Binarization/Binarization.h
trunk/ImageProcessing/opencv/components/Binarization/src/Binarization.cpp
trunk/ImageProcessing/opencv/components/CameraViewer/include/CameraViewer/CameraViewer.h
trunk/ImageProcessing/opencv/components/CameraViewer/src/CameraViewer.cpp
trunk/ImageProcessing/opencv/components/Chromakey/include/Chromakey/Chromakey.h
trunk/ImageProcessing/opencv/components/Chromakey/src/Chromakey.cpp
trunk/ImageProcessing/opencv/components/Dilationerosion/include/DilationErosion/DilationErosion.h
trunk/ImageProcessing/opencv/components/Dilationerosion/src/DilationErosion.cpp
trunk/ImageProcessing/opencv/components/Edge/include/Edge/Edge.h
trunk/ImageProcessing/opencv/components/Edge/src/Edge.cpp
trunk/ImageProcessing/opencv/components/Findcontour/include/Findcontour/Findcontour.h
trunk/ImageProcessing/opencv/components/Findcontour/src/Findcontour.cpp
trunk/ImageProcessing/opencv/components/Flip/include/Flip/Flip.h
trunk/ImageProcessing/opencv/components/Flip/src/Flip.cpp
trunk/ImageProcessing/opencv/components/Histogram/include/Histogram/Histogram.h
trunk/ImageProcessing/opencv/components/Histogram/src/Histogram.cpp
trunk/ImageProcessing/opencv/components/Houghline/include/Hough/Hough.h
trunk/ImageProcessing/opencv/components/Houghline/src/Hough.cpp
trunk/ImageProcessing/opencv/components/ImageCalibration/include/ImageCalibration/CalibrationServiceSVC_impl.h
trunk/ImageProcessing/opencv/components/ImageCalibration/src/CalibrationServiceSVC_impl.cpp
trunk/ImageProcessing/opencv/components/ImageCalibration/src/ImageCalibration.cpp
trunk/ImageProcessing/opencv/components/ImageSubstraction/include/ImageSubstraction/ImageSubstraction.h
trunk/ImageProcessing/opencv/components/ImageSubstraction/src/ImageSubstraction.cpp
trunk/ImageProcessing/opencv/components/MFCamera/include/MFCamera/MFCapture.h
trunk/ImageProcessing/opencv/components/MFCamera/src/MFCamera.cpp
trunk/ImageProcessing/opencv/components/MFCamera/src/MFCapture.cpp
trunk/ImageProcessing/opencv/components/ObjectTracking/include/ObjectTracking/ObjectTracking.h
trunk/ImageProcessing/opencv/components/ObjectTracking/src/ObjectTracking.cpp
trunk/ImageProcessing/opencv/components/OpenCVCamera/include/OpenCVCamera/OpenCVCamera.h
trunk/ImageProcessing/opencv/components/OpenCVCamera/src/OpenCVCamera.cpp
trunk/ImageProcessing/opencv/components/Perspective/include/Perspective/Perspective.h
trunk/ImageProcessing/opencv/components/Perspective/src/Perspective.cpp
trunk/ImageProcessing/opencv/components/RockPaperScissors/include/RockPaperScissors/RockPaperScissors.h
trunk/ImageProcessing/opencv/components/RockPaperScissors/src/RockPaperScissors.cpp
trunk/ImageProcessing/opencv/components/Rotate/include/Rotate/Rotate.h
trunk/ImageProcessing/opencv/components/Rotate/src/Rotate.cpp
trunk/ImageProcessing/opencv/components/Scale/include/Scale/Scale.h
trunk/ImageProcessing/opencv/components/Scale/src/Scale.cpp
trunk/ImageProcessing/opencv/components/Sepia/include/Sepia/Sepia.h
trunk/ImageProcessing/opencv/components/Sepia/src/Sepia.cpp
trunk/ImageProcessing/opencv/components/SubStractCaptureImage/include/SubStractCaptureImage/SubStractCaptureImage.h
trunk/ImageProcessing/opencv/components/SubStractCaptureImage/src/SubStractCaptureImage.cpp
trunk/ImageProcessing/opencv/components/Template/include/Template/Template.h
trunk/ImageProcessing/opencv/components/Template/src/Template.cpp
trunk/ImageProcessing/opencv/components/Translate/include/Translate/Translate.h
trunk/ImageProcessing/opencv/components/Translate/src/Translate.cpp
Log:
OpenCV3 support. refs #3826
Modified: trunk/ImageProcessing/opencv/components/Affine/include/Affine/Affine.h
===================================================================
--- trunk/ImageProcessing/opencv/components/Affine/include/Affine/Affine.h 2016-08-22 02:27:08 UTC (rev 180)
+++ trunk/ImageProcessing/opencv/components/Affine/include/Affine/Affine.h 2017-02-02 00:05:08 UTC (rev 181)
@@ -20,10 +20,9 @@
#include <rtm/idl/InterfaceDataTypesSkel.h>
/* OpenCV用インクルードファイルのインクルード */
-#include<cv.h>
-#include<cxcore.h>
-#include<highgui.h>
+#include <opencv2/imgproc/imgproc.hpp>
+
// Service implementation headers
// <rtc-template block="service_impl_h">
@@ -280,11 +279,11 @@
// </rtc-template>
- CvMat *m_affineMatrix;
- IplImage* m_image_buff; // Original Image
- IplImage* m_image_dest; // 結果出力用IplImage
+ cv::Mat m_image_buff; // Original Image
+ cv::Mat m_image_dest; // 結果出力用IplImage
+
int m_in_height; // 入力イメージのHeight
int m_in_width; // 入力イメージのWidth
Modified: trunk/ImageProcessing/opencv/components/Affine/src/Affine.cpp
===================================================================
--- trunk/ImageProcessing/opencv/components/Affine/src/Affine.cpp 2016-08-22 02:27:08 UTC (rev 180)
+++ trunk/ImageProcessing/opencv/components/Affine/src/Affine.cpp 2017-02-02 00:05:08 UTC (rev 181)
@@ -147,27 +147,30 @@
RTC::ReturnCode_t Affine::onActivated(RTC::UniqueId ec_id)
{
/* イメージ用メモリの確保 */
- m_affineMatrix = cvCreateMat( 2, 3, CV_32FC1);
m_in_height = 0;
m_in_width = 0;
- m_image_buff = NULL;
- m_image_dest = NULL;
+
return RTC::RTC_OK;
}
RTC::ReturnCode_t Affine::onDeactivated(RTC::UniqueId ec_id)
{
- if(m_image_buff != NULL)
- cvReleaseImage(&m_image_buff);
- if(m_image_dest != NULL)
- cvReleaseImage(&m_image_dest);
- cvReleaseMat(&m_affineMatrix);
+ if (!m_image_buff.empty())
+ {
+ m_image_buff.release();
+ }
+ if (!m_image_dest.empty())
+ {
+ m_image_dest.release();
+ }
+
+
return RTC::RTC_OK;
}
@@ -189,49 +192,55 @@
m_in_height = m_image_orig.height;
m_in_width = m_image_orig.width;
- if(m_image_buff != NULL)
- cvReleaseImage(&m_image_buff);
- if(m_image_dest != NULL)
- cvReleaseImage(&m_image_dest);
+
/* サイズ変換のためTempメモリーを用意する */
- m_image_buff = cvCreateImage(cvSize(m_in_width, m_in_height), IPL_DEPTH_8U, 3);
- m_image_dest = cvCreateImage(cvSize(m_in_width, m_in_height), IPL_DEPTH_8U, 3);
+ m_image_buff.create(cv::Size(m_in_width, m_in_height), CV_8UC3);
+ m_image_dest.create(cv::Size(m_in_width, m_in_height), CV_8UC3);
+
}
/* InPortの画像データをIplImageのimageDataにコピー */
- memcpy(m_image_buff->imageData,(void *)&(m_image_orig.pixels[0]),m_image_orig.pixels.length());
+ memcpy(m_image_buff.data, (void *)&(m_image_orig.pixels[0]), m_image_orig.pixels.length());
+ cv::Mat_<double> m_affineMatrix(2, 3);
/* 変換後の座標を設定する */
+
// Check configuration validations
if(isConfigurationValidated())
{
- cvmSet(m_affineMatrix, 0, 0, m_ve2dbMatrix[0][0]);
- cvmSet(m_affineMatrix, 0, 1, m_ve2dbMatrix[0][1]);
- cvmSet(m_affineMatrix, 0, 2, m_ve2dbMatrix[0][2]);
- cvmSet(m_affineMatrix, 1, 0, m_ve2dbMatrix[1][0]);
- cvmSet(m_affineMatrix, 1, 1, m_ve2dbMatrix[1][1]);
- cvmSet(m_affineMatrix, 1, 2, m_ve2dbMatrix[1][2]);
+ m_affineMatrix(0, 0) = m_ve2dbMatrix[0][0];
+ m_affineMatrix(0, 1) = m_ve2dbMatrix[0][1];
+ m_affineMatrix(0, 2) = m_ve2dbMatrix[0][2];
+ m_affineMatrix(1, 0) = m_ve2dbMatrix[1][0];
+ m_affineMatrix(1, 1) = m_ve2dbMatrix[1][1];
+ m_affineMatrix(1, 2) = m_ve2dbMatrix[1][2];
+
+
}else
{
cout<<"Incorrect configuration information."<<endl;
return RTC::RTC_ERROR;
}
+
+
+
/* 変換行列を反映させる */
- cvWarpAffine( m_image_buff, m_image_dest, m_affineMatrix, CV_INTER_LINEAR | CV_WARP_FILL_OUTLIERS, cvScalarAll(0));
+ warpAffine(m_image_buff, m_image_dest, m_affineMatrix, m_image_dest.size());
/* 画像データのサイズ取得 */
- int len = m_image_dest->nChannels * m_image_dest->width * m_image_dest->height;
+ int len = m_image_dest.channels() * m_image_dest.size().width * m_image_dest.size().height;
/* 画面のサイズ情報を入れる */
- m_image_affine.pixels.length(len);
- m_image_affine.width = m_image_dest->width;
- m_image_affine.height = m_image_dest->height;
+ m_image_affine.pixels.length(len);
+ m_image_affine.width = m_image_dest.size().width;
+ m_image_affine.height = m_image_dest.size().height;
+
/* 反転した画像データをOutPortにコピー */
- memcpy((void *)&(m_image_affine.pixels[0]), m_image_dest->imageData,len);
+ memcpy((void *)&(m_image_affine.pixels[0]), m_image_dest.data,len);
/* 反転した画像データをOutPortから出力する */
m_image_affineOut.write();
Modified: trunk/ImageProcessing/opencv/components/BackGroundSubtractionSimple/include/BackGroundSubtractionSimple/BackGroundSubtractionSimple.h
===================================================================
--- trunk/ImageProcessing/opencv/components/BackGroundSubtractionSimple/include/BackGroundSubtractionSimple/BackGroundSubtractionSimple.h 2016-08-22 02:27:08 UTC (rev 180)
+++ trunk/ImageProcessing/opencv/components/BackGroundSubtractionSimple/include/BackGroundSubtractionSimple/BackGroundSubtractionSimple.h 2017-02-02 00:05:08 UTC (rev 181)
@@ -19,9 +19,7 @@
#include <rtm/idl/ExtendedDataTypesSkel.h>
#include <rtm/idl/InterfaceDataTypesSkel.h>
-#include <cv.h>
-#include <cxcore.h>
-#include <highgui.h>
+#include <opencv2/imgproc/imgproc.hpp>
#define THRESHOLD_MAX_VALUE 255 /* 2値化の際に使用する最大値 */
#define SCALE ( 1.0 / 255.0 ) /* L*a*b*に変換するために必要なスケールファクタ */
@@ -323,11 +321,11 @@
void labDifference( void );
void grayScaleDifference( void );
- IplImage *m_originalImage;
- IplImage *m_currentImage;
- IplImage *m_backgroundImage;
- IplImage *m_resultImage;
- IplImage *m_outputImage;
+ cv::Mat m_originalImage;
+ cv::Mat m_currentImage;
+ cv::Mat m_backgroundImage;
+ cv::Mat m_resultImage;
+ cv::Mat m_outputImage;
int m_differenceMode; /* 差分の計算モード */
int m_noiseMode; /* ノイズを除去するモード */
Modified: trunk/ImageProcessing/opencv/components/BackGroundSubtractionSimple/src/BackGroundSubtractionSimple.cpp
===================================================================
--- trunk/ImageProcessing/opencv/components/BackGroundSubtractionSimple/src/BackGroundSubtractionSimple.cpp 2016-08-22 02:27:08 UTC (rev 180)
+++ trunk/ImageProcessing/opencv/components/BackGroundSubtractionSimple/src/BackGroundSubtractionSimple.cpp 2017-02-02 00:05:08 UTC (rev 181)
@@ -155,28 +155,31 @@
RTC::ReturnCode_t BackGroundSubtractionSimple::onDeactivated(RTC::UniqueId ec_id)
{
- if(m_originalImage != NULL){
- cvReleaseImage(&m_originalImage);
+
+
+ if (!m_originalImage.empty())
+ {
+ m_originalImage.release();
}
- if(m_currentImage != NULL){
- cvReleaseImage(&m_currentImage);
+ if (!m_currentImage.empty())
+ {
+ m_currentImage.release();
}
- if(m_resultImage != NULL){
- cvReleaseImage(&m_resultImage);
+ if (!m_resultImage.empty())
+ {
+ m_resultImage.release();
}
- if(m_outputImage != NULL){
- cvReleaseImage(&m_outputImage);
+ if (!m_outputImage.empty())
+ {
+ m_outputImage.release();
}
- if(m_backgroundImage != NULL){
- cvReleaseImage(&m_backgroundImage);
+ if (!m_backgroundImage.empty())
+ {
+ m_backgroundImage.release();
}
- m_originalImage = NULL;
- m_currentImage = NULL;
- m_backgroundImage = NULL;
- m_resultImage = NULL;
- m_outputImage = NULL;
+
return RTC::RTC_OK;
}
@@ -189,31 +192,37 @@
/* イメージRead */
m_img_origIn.read();
- if(m_originalImage == NULL){
- m_originalImage = cvCreateImage(cvSize(m_img_orig.width, m_img_orig.height), IPL_DEPTH_8U, 3);
+ if (m_originalImage.empty())
+ {
+ m_originalImage.create(cv::Size(m_img_orig.width, m_img_orig.height), CV_8UC3);
}
- if(m_currentImage == NULL){
- m_currentImage = cvCreateImage(cvSize(m_img_orig.width, m_img_orig.height), IPL_DEPTH_8U, 3);
+ if (m_currentImage.empty())
+ {
+ m_currentImage.create(cv::Size(m_img_orig.width, m_img_orig.height), CV_8UC3);
}
if(m_img_orig.width != m_temp_w || m_img_orig.height != m_temp_h){
+ if (m_backgroundImage.empty())
+ {
+ m_backgroundImage.create(cv::Size(m_img_orig.width, m_img_orig.height), CV_8UC3);
+ }
+ }
- if(m_backgroundImage != NULL){
- cvReleaseImage(&m_backgroundImage);
- }
- m_backgroundImage = cvCreateImage(cvSize(m_img_orig.width, m_img_orig.height), IPL_DEPTH_8U, 3);
+
+
+ if (m_resultImage.empty())
+ {
+ m_resultImage.create(cv::Size(m_img_orig.width, m_img_orig.height), CV_8UC1);
}
- if(m_resultImage == NULL){
- m_resultImage = cvCreateImage(cvSize(m_img_orig.width, m_img_orig.height), IPL_DEPTH_8U, 1);
+ if (m_outputImage.empty())
+ {
+ m_outputImage.create(cv::Size(m_img_orig.width, m_img_orig.height), CV_8UC3);
}
- if(m_outputImage == NULL){
- m_outputImage = cvCreateImage(cvSize(m_img_orig.width, m_img_orig.height), IPL_DEPTH_8U, 3);
- }
/* InPortの映像の取得 */
- memcpy(m_originalImage->imageData,(void *)&(m_img_orig.pixels[0]),m_img_orig.pixels.length());
- m_currentImage = cvCloneImage( m_originalImage );
+ memcpy(m_originalImage.data,(void *)&(m_img_orig.pixels[0]),m_img_orig.pixels.length());
+ m_currentImage = m_originalImage.clone();
/* 差の計算方法の切り替え */
if( m_differenceMode == COLOR_DIFFERENCE ){
@@ -229,36 +238,41 @@
/* ノイズ除去 */
if( m_noiseMode == NOISE_MORPHOLOGY ){
- cvErode( m_resultImage, m_resultImage );
- cvDilate( m_resultImage, m_resultImage );
+ //cv::Mat tmp();
+ erode(m_currentImage, m_backgroundImage, cv::Mat(), cv::Point(-1, -1), 1);
+ dilate(m_backgroundImage, m_resultImage, cv::Mat(), cv::Point(-1, -1), 1);
}else if ( m_noiseMode == NOISE_MEDIAN ){
- cvSmooth( m_resultImage, m_resultImage, CV_MEDIAN );
+ GaussianBlur(m_currentImage, m_resultImage, m_currentImage.size(), 0,0);
}
- cvMerge( m_resultImage, m_resultImage, m_resultImage, NULL, m_outputImage );
+ std::vector<cv::Mat> tmp;
+ tmp.push_back(m_resultImage);
+ tmp.push_back(m_resultImage);
+ tmp.push_back(m_resultImage);
+ merge(tmp, m_outputImage);
/* 画像データのサイズ取得 */
- int len1 = (m_currentImage->nChannels * m_currentImage->width * m_currentImage->height);
- int len2 = (m_outputImage->nChannels * m_outputImage->width * m_outputImage->height);
- int len3 = (m_backgroundImage->nChannels * m_backgroundImage->width * m_backgroundImage->height);
+ int len1 = (m_currentImage.channels() * m_currentImage.size().width * m_currentImage.size().height);
+ int len2 = (m_outputImage.channels() * m_outputImage.size().width * m_outputImage.size().height);
+ int len3 = (m_backgroundImage.channels() * m_backgroundImage.size().width * m_backgroundImage.size().height);
m_img_curr.pixels.length(len1);
m_img_resu.pixels.length(len2);
m_img_back.pixels.length(len3);
/* 該当のイメージをMemCopyする */
- memcpy((void *)&(m_img_curr.pixels[0]), m_currentImage->imageData, len1);
- memcpy((void *)&(m_img_resu.pixels[0]), m_outputImage->imageData, len2);
- memcpy((void *)&(m_img_back.pixels[0]), m_backgroundImage->imageData, len3);
+ memcpy((void *)&(m_img_curr.pixels[0]), m_currentImage.data, len1);
+ memcpy((void *)&(m_img_resu.pixels[0]), m_outputImage.data, len2);
+ memcpy((void *)&(m_img_back.pixels[0]), m_backgroundImage.data, len3);
- m_img_curr.width = m_originalImage->width;
- m_img_curr.height = m_originalImage->height;
+ m_img_curr.width = m_originalImage.size().width;
+ m_img_curr.height = m_originalImage.size().height;
- m_img_resu.width = m_originalImage->width;
- m_img_resu.height = m_originalImage->height;
+ m_img_resu.width = m_originalImage.size().width;
+ m_img_resu.height = m_originalImage.size().height;
- m_img_back.width = m_originalImage->width;
- m_img_back.height = m_originalImage->height;
+ m_img_back.width = m_originalImage.size().width;
+ m_img_back.height = m_originalImage.size().height;
m_img_currOut.write();
m_img_resuOut.write();
@@ -271,10 +285,8 @@
if(m_cont_mode == 'b')
{
/* 背景画像更新 */
- if(m_backgroundImage != NULL) {
- cvReleaseImage(&m_backgroundImage);
- }
- m_backgroundImage = cvCloneImage(m_originalImage);
+
+ m_backgroundImage = m_originalImage.clone();
printf( "Background image update( %s : %s )\n",
differenceMethod[m_differenceMode].c_str(), noiseMethod[m_noiseMode].c_str() );
@@ -288,18 +300,7 @@
}
}
- if(m_originalImage != NULL){
- cvReleaseImage(&m_originalImage);
- }
- if(m_currentImage != NULL){
- cvReleaseImage(&m_currentImage);
- }
- if(m_resultImage != NULL){
- cvReleaseImage(&m_resultImage);
- }
- if(m_outputImage != NULL){
- cvReleaseImage(&m_outputImage);
- }
+
//if(backgroundImage != NULL){
// cvReleaseImage(&backgroundImage);
//}
@@ -354,30 +355,35 @@
{
/* 画像を生成する */
- IplImage *differenceImage = cvCreateImage(cvSize(m_currentImage->width, m_currentImage->height), IPL_DEPTH_8U, 3); // 差分画像用IplImage
- IplImage *differenceRImage = cvCreateImage(cvSize(m_currentImage->width, m_currentImage->height), IPL_DEPTH_8U, 1); // R値の差分用IplImage
- IplImage *differenceGImage = cvCreateImage(cvSize(m_currentImage->width, m_currentImage->height), IPL_DEPTH_8U, 1); // G値の差分用IplImage
- IplImage *differenceBImage = cvCreateImage(cvSize(m_currentImage->width, m_currentImage->height), IPL_DEPTH_8U, 1); // B値の差分用IplImage
+ cv::Mat differenceImage;
+ cv::Mat differenceRImage;
+ cv::Mat differenceGImage;
+ cv::Mat differenceBImage;
+ differenceImage.create(m_currentImage.size(), CV_8UC3);
+ differenceRImage.create(m_currentImage.size(), CV_8UC1);
+ differenceGImage.create(m_currentImage.size(), CV_8UC1);
+ differenceBImage.create(m_currentImage.size(), CV_8UC1);
+
/* 現在の背景との差の絶対値を成分ごとに取る */
- cvAbsDiff( m_currentImage, m_backgroundImage, differenceImage );
+ absdiff( m_currentImage, m_backgroundImage, differenceImage );
/* 閾値処理を行う */
- cvThreshold( differenceImage, differenceImage, m_nThresholdLv, THRESHOLD_MAX_VALUE, CV_THRESH_BINARY );
+ threshold( differenceImage, differenceImage, m_nThresholdLv, THRESHOLD_MAX_VALUE, cv::THRESH_BINARY );
/* 成分ごとの画像に分割する */
- cvSplit( differenceImage, differenceBImage, differenceGImage, differenceRImage, NULL );
+ std::vector<cv::Mat> tmp;
+ tmp.push_back(differenceBImage);
+ tmp.push_back(differenceGImage);
+ tmp.push_back(differenceRImage);
+ split(differenceImage, tmp);
/* ORで合成する */
- cvOr( differenceRImage, differenceGImage, m_resultImage );
- cvOr( differenceBImage, m_resultImage, m_resultImage );
+ cv::bitwise_or(differenceRImage, differenceGImage, m_resultImage);
+ cv::bitwise_or(differenceBImage, m_resultImage, m_resultImage);
- /* メモリを解放する */
- cvReleaseImage( &differenceImage );
- cvReleaseImage( &differenceRImage );
- cvReleaseImage( &differenceGImage );
- cvReleaseImage( &differenceBImage );
+
}
//
@@ -386,42 +392,49 @@
void BackGroundSubtractionSimple::labDifference( void )
{
/* 画像を生成する */
- IplImage *currentLabImage = cvCreateImage( cvSize(m_currentImage->width, m_currentImage->height),IPL_DEPTH_32F, 3 ); /* 現在の画像をL*a*b*に変換した画像用IplImage */
- IplImage *backgroundLabImage = cvCreateImage( cvSize(m_currentImage->width, m_currentImage->height), IPL_DEPTH_32F, 3 ); /* 背景をL*a*b*に変換した画像用IplImage */
- IplImage *differenceLabImage = cvCreateImage( cvSize(m_currentImage->width, m_currentImage->height), IPL_DEPTH_32F, 3 ); /* 差分画像用IplImage */
- IplImage *differenceLImage = cvCreateImage( cvSize(m_currentImage->width, m_currentImage->height), IPL_DEPTH_32F, 1 ); /* L*値の差分用IplImage */
- IplImage *differenceAImage = cvCreateImage( cvSize(m_currentImage->width, m_currentImage->height), IPL_DEPTH_32F, 1 ); /* a*値の差分用IplImage */
- IplImage *differenceBImage = cvCreateImage( cvSize(m_currentImage->width, m_currentImage->height), IPL_DEPTH_32F, 1 ); /* b*値の差分用IplImage */
- IplImage *sqrDifferenceImage = cvCreateImage( cvSize(m_currentImage->width, m_currentImage->height), IPL_DEPTH_32F, 1 ); /* 距離算出用IplImage */
+ cv::Mat currentLabImage;
+ cv::Mat backgroundLabImage;
+ cv::Mat differenceLabImage;
+ cv::Mat differenceLImage;
+ cv::Mat differenceAImage;
+ cv::Mat differenceBImage;
+ cv::Mat sqrDifferenceImage;
+
+ currentLabImage.create(m_currentImage.size(), CV_8UC3); /* 現在の画像をL*a*b*に変換した画像用IplImage */
+ backgroundLabImage.create(m_currentImage.size(), CV_8UC3);/* 背景をL*a*b*に変換した画像用IplImage */
+ differenceLabImage.create(m_currentImage.size(), CV_8UC3);/* 差分画像用IplImage */
+ differenceLImage.create(m_currentImage.size(), CV_8UC1); /* L*値の差分用IplImage */
+ differenceAImage.create(m_currentImage.size(), CV_8UC1); /* a*値の差分用IplImage */
+ differenceBImage.create(m_currentImage.size(), CV_8UC1); /* b*値の差分用IplImage */
+ sqrDifferenceImage.create(m_currentImage.size(), CV_8UC1);/* 距離算出用IplImage */
+
/* 現在の画像と背景を共に CIE L*a*b* に変換 */
- cvConvertScale( m_currentImage, currentLabImage, SCALE );
- cvConvertScale( m_backgroundImage, backgroundLabImage, SCALE );
- cvCvtColor( currentLabImage, currentLabImage, CV_BGR2Lab );
- cvCvtColor( backgroundLabImage, backgroundLabImage, CV_BGR2Lab );
+ currentLabImage.convertTo(m_currentImage, CV_32F, SCALE);
+ backgroundLabImage.convertTo(m_backgroundImage, CV_32F, SCALE);
+ cv::cvtColor(currentLabImage, currentLabImage, CV_BGR2Lab);
+ cv::cvtColor(backgroundLabImage, backgroundLabImage, CV_BGR2Lab);
+
/* 距離の二乗を計算する */
- cvSub( currentLabImage, backgroundLabImage, differenceLabImage );
- cvPow( differenceLabImage, differenceLabImage, 2 );
+ subtract(currentLabImage, backgroundLabImage, differenceLabImage);
+ cv::pow(differenceLabImage,2, differenceLabImage);
/* 成分ごとの画像に分割する */
- cvSplit( differenceLabImage, differenceLImage, differenceAImage, differenceBImage, NULL );
+ std::vector<cv::Mat> tmp;
+ tmp.push_back(differenceLImage);
+ tmp.push_back(differenceAImage);
+ tmp.push_back(differenceBImage);
+ cv::split(differenceLabImage, tmp);
- cvCopy( differenceLImage, sqrDifferenceImage );
- cvAdd( differenceAImage, sqrDifferenceImage, sqrDifferenceImage );
- cvAdd( differenceBImage, sqrDifferenceImage, sqrDifferenceImage );
+ sqrDifferenceImage = differenceLImage;
+ //sqrDifferenceImage.copyTo(differenceLImage);
+ cv::add( differenceAImage, sqrDifferenceImage, sqrDifferenceImage );
+ cv::add(differenceBImage, sqrDifferenceImage, sqrDifferenceImage);
/* 閾値処理を行う */
- cvThreshold( sqrDifferenceImage, m_resultImage, m_nThresholdLv * m_nThresholdLv, THRESHOLD_MAX_VALUE, CV_THRESH_BINARY );
+ threshold(sqrDifferenceImage, m_resultImage, m_nThresholdLv, THRESHOLD_MAX_VALUE, cv::THRESH_BINARY);
- /* メモリを解放する */
- cvReleaseImage( ¤tLabImage );
- cvReleaseImage( &backgroundLabImage );
- cvReleaseImage( &differenceLabImage );
- cvReleaseImage( &differenceLImage );
- cvReleaseImage( &differenceAImage );
- cvReleaseImage( &differenceBImage );
- cvReleaseImage( &sqrDifferenceImage );
}
//
@@ -430,19 +443,22 @@
void BackGroundSubtractionSimple::grayScaleDifference( void )
{
/* 画像を生成する */
- IplImage *differenceImage = cvCreateImage( cvSize(m_currentImage->width, m_currentImage->height), IPL_DEPTH_8U, 3 ); /* 差分画像用IplImage */
+ cv::Mat differenceImage; /* 差分画像用IplImage */
+
+ differenceImage.create(m_currentImage.size(), CV_8UC3);
+
+
/* 現在の背景との差の絶対値を成分ごとに取る */
- cvAbsDiff( m_currentImage, m_backgroundImage, differenceImage );
+ absdiff( m_currentImage, m_backgroundImage, differenceImage );
/* BGRからグレースケールに変換する */
- cvCvtColor( differenceImage, m_resultImage, CV_BGR2GRAY );
+ cvtColor(differenceImage, m_resultImage, cv::COLOR_BGR2GRAY);
/* グレースケールから2値に変換する */
- cvThreshold( m_resultImage, m_resultImage, m_nThresholdLv, THRESHOLD_MAX_VALUE, CV_THRESH_BINARY );
+ threshold( m_resultImage, m_resultImage, m_nThresholdLv, THRESHOLD_MAX_VALUE, cv::THRESH_BINARY );
- /* メモリを解放する */
- cvReleaseImage( &differenceImage );
+
}
extern "C"
Modified: trunk/ImageProcessing/opencv/components/Binarization/include/Binarization/Binarization.h
===================================================================
--- trunk/ImageProcessing/opencv/components/Binarization/include/Binarization/Binarization.h 2016-08-22 02:27:08 UTC (rev 180)
+++ trunk/ImageProcessing/opencv/components/Binarization/include/Binarization/Binarization.h 2017-02-02 00:05:08 UTC (rev 181)
@@ -20,9 +20,7 @@
#include <rtm/idl/InterfaceDataTypesSkel.h>
/* OpenCV用インクルードファイルのインクルード */
-#include<cv.h>
-#include<cxcore.h>
-#include<highgui.h>
+#include <opencv2/imgproc/imgproc.hpp>
// Service implementation headers
// <rtc-template block="service_impl_h">
@@ -282,12 +280,12 @@
int m_in_height; // 入力イメージのHeight
int m_in_width; // 入力イメージのWidth
- IplImage* m_image_buff; // Original Image
+ cv::Mat m_image_buff; // Original Image
- IplImage* m_image_gray; // Grayscale image
- IplImage* m_image_binary; // Binary image
+ cv::Mat m_image_gray; // Grayscale image
+ cv::Mat m_image_binary; // Binary image
- IplImage* m_image_dest; // 結果出力用IplImage
+ cv::Mat m_image_dest; // 結果出力用IplImage
};
Modified: trunk/ImageProcessing/opencv/components/Binarization/src/Binarization.cpp
===================================================================
--- trunk/ImageProcessing/opencv/components/Binarization/src/Binarization.cpp 2016-08-22 02:27:08 UTC (rev 180)
+++ trunk/ImageProcessing/opencv/components/Binarization/src/Binarization.cpp 2017-02-02 00:05:08 UTC (rev 181)
@@ -109,12 +109,8 @@
RTC::ReturnCode_t Binarization::onActivated(RTC::UniqueId ec_id)
{
- /* イメージ用メモリの確保 */
- m_image_buff = NULL;
- m_image_binary = NULL;
- m_image_gray = NULL;
- m_image_dest = NULL;
+
m_in_height = 0;
m_in_width = 0;
@@ -124,14 +120,23 @@
RTC::ReturnCode_t Binarization::onDeactivated(RTC::UniqueId ec_id)
{
- if(m_image_buff != NULL)
- cvReleaseImage(&m_image_buff);
- if(m_image_binary != NULL)
- cvReleaseImage(&m_image_binary);
- if(m_image_gray != NULL)
- cvReleaseImage(&m_image_gray);
- if(m_image_dest != NULL)
- cvReleaseImage(&m_image_dest);
+ if (!m_image_buff.empty())
+ {
+ m_image_buff.release();
+ }
+ if (!m_image_binary.empty())
+ {
+ m_image_binary.release();
+ }
+ if (!m_image_gray.empty())
+ {
+ m_image_gray.release();
+ }
+ if (!m_image_dest.empty())
+ {
+ m_image_dest.release();
+ }
+
return RTC::RTC_OK;
}
@@ -154,46 +159,44 @@
m_in_height = m_image_orig.height;
m_in_width = m_image_orig.width;
- if(m_image_buff != NULL)
- cvReleaseImage(&m_image_buff);
- if(m_image_binary != NULL)
- cvReleaseImage(&m_image_binary);
- if(m_image_gray != NULL)
- cvReleaseImage(&m_image_gray);
- if(m_image_dest != NULL)
- cvReleaseImage(&m_image_dest);
+ /* サイズ変換のためTempメモリーをよいする */
+ m_image_buff.create(cv::Size(m_in_width, m_in_height), CV_8UC3);
+ m_image_binary.create(cv::Size(m_in_width, m_in_height), CV_8UC1);
+ m_image_gray.create(cv::Size(m_in_width, m_in_height), CV_8UC1);
+ m_image_dest.create(cv::Size(m_in_width, m_in_height), CV_8UC3);
+
+
- /* サイズ変換のためTempメモリーをよいする */
- m_image_buff = cvCreateImage(cvSize(m_in_width, m_in_height), IPL_DEPTH_8U, 3);
- m_image_binary = cvCreateImage(cvSize(m_in_width, m_in_height), IPL_DEPTH_8U, 1);
- m_image_gray = cvCreateImage(cvSize(m_in_width, m_in_height), IPL_DEPTH_8U, 1);
- m_image_dest = cvCreateImage(cvSize(m_in_width, m_in_height), IPL_DEPTH_8U, 3);
}
/* InPortの画像データをIplImageのimageDataにコピー */
- memcpy(m_image_buff->imageData,(void *)&(m_image_orig.pixels[0]),m_image_orig.pixels.length());
+ memcpy(m_image_buff.data,(void *)&(m_image_orig.pixels[0]),m_image_orig.pixels.length());
/* Anternative process */
/* BGRからグレースケールに変換する */
- cvCvtColor( m_image_buff, m_image_gray, CV_BGR2GRAY );
+ cv::cvtColor(m_image_buff, m_image_gray, cv::COLOR_BGR2GRAY);
/* グレースケールから2値に変換する */
- cvThreshold( m_image_gray, m_image_binary, m_nThresholdLv, THRESHOLD_MAX_VALUE, CV_THRESH_BINARY );
+ cv::threshold(m_image_gray, m_image_binary, m_nThresholdLv, THRESHOLD_MAX_VALUE, cv::THRESH_BINARY);
+ std::vector<cv::Mat> tmp;
+ tmp.push_back(m_image_binary);
+ tmp.push_back(m_image_binary);
+ tmp.push_back(m_image_binary);
/* Convert to 3channel image */
- cvMerge(m_image_binary, m_image_binary, m_image_binary, NULL, m_image_dest);
+ cv::merge(tmp, m_image_dest);
/* Common process */
/* 画像データのサイズ取得 */
- int len = m_image_dest->nChannels * m_image_dest->width * m_image_dest->height;
+ int len = m_image_dest.channels() * m_image_dest.size().width * m_image_dest.size().height;
/* 画面のサイズ情報を入れる */
m_image_output.pixels.length(len);
- m_image_output.width = m_image_dest->width;
- m_image_output.height = m_image_dest->height;
+ m_image_output.width = m_image_dest.size().width;
+ m_image_output.height = m_image_dest.size().height;
/* 反転した画像データをOutPortにコピー */
- memcpy((void *)&(m_image_output.pixels[0]), m_image_dest->imageData,len);
+ memcpy((void *)&(m_image_output.pixels[0]), m_image_dest.data,len);
/* 反転した画像データをOutPortから出力する */
m_image_outputOut.write();
Modified: trunk/ImageProcessing/opencv/components/CameraViewer/include/CameraViewer/CameraViewer.h
===================================================================
--- trunk/ImageProcessing/opencv/components/CameraViewer/include/CameraViewer/CameraViewer.h 2016-08-22 02:27:08 UTC (rev 180)
+++ trunk/ImageProcessing/opencv/components/CameraViewer/include/CameraViewer/CameraViewer.h 2017-02-02 00:05:08 UTC (rev 181)
@@ -19,9 +19,8 @@
#include <rtm/idl/ExtendedDataTypesSkel.h>
#include <rtm/idl/InterfaceDataTypesSkel.h>
-#include<cv.h>
-#include<cxcore.h>
-#include<highgui.h>
+#include <opencv2/imgproc/imgproc.hpp>
+#include <opencv2/highgui/highgui.hpp>
// Service implementation headers
// <rtc-template block="service_impl_h">
@@ -302,7 +301,7 @@
private:
int dummy;
- IplImage* m_orig_img;
+ cv::Mat m_orig_img;
int m_nOldHeight; /* CFG更新チェック用 */
int m_nOldWidth;
Modified: trunk/ImageProcessing/opencv/components/CameraViewer/src/CameraViewer.cpp
===================================================================
--- trunk/ImageProcessing/opencv/components/CameraViewer/src/CameraViewer.cpp 2016-08-22 02:27:08 UTC (rev 180)
+++ trunk/ImageProcessing/opencv/components/CameraViewer/src/CameraViewer.cpp 2017-02-02 00:05:08 UTC (rev 181)
@@ -120,14 +120,14 @@
RTC::ReturnCode_t CameraViewer::onActivated(RTC::UniqueId ec_id)
{
- m_orig_img = NULL;
+
m_in_height = 0;
m_in_width = 0;
/* 画像表示用ウィンドウの作成 */
- cvNamedWindow("CaptureImage", CV_WINDOW_AUTOSIZE);
- cvSetMouseCallback("CaptureImage", onMouse, (void*)this);
+ cv::namedWindow("CaptureImage", CV_WINDOW_AUTOSIZE);
+ cv::setMouseCallback("CaptureImage", onMouse, (void*)this);
return RTC::RTC_OK;
}
@@ -135,11 +135,14 @@
RTC::ReturnCode_t CameraViewer::onDeactivated(RTC::UniqueId ec_id)
{
- if(m_orig_img != NULL)
- cvReleaseImage(&m_orig_img);
- /* 表示ウィンドウの消去 */
- cvDestroyWindow("CaptureImage");
+ if (!m_orig_img.empty())
+ {
+ m_orig_img.release();
+ }
+ cv::destroyWindow("CaptureImage");
+
+
return RTC::RTC_OK;
}
@@ -151,7 +154,7 @@
int nLength;
- m_lKey.data = cvWaitKey(1);
+ m_lKey.data = cv::waitKey(1);
if(m_lKey.data >= 0)
{
printf("[onExecute] Key number %ld is down\n", m_lKey.data);
@@ -177,25 +180,24 @@
{
printf("[onExecute] Size of input image is not match!\n");
- if(m_orig_img != NULL)
- cvReleaseImage(&m_orig_img);
+
m_in_height = m_in.height;
m_in_width = m_in.width;
/* サイズ変換のためTempメモリーを用意する */
- m_orig_img = cvCreateImage(cvSize(m_in.width, m_in.height), IPL_DEPTH_8U, 3);
+ m_orig_img.create(cv::Size(m_in.width, m_in.height), CV_8UC3);
}
/* データコピー */
- memcpy(m_orig_img->imageData,(void *)&(m_in.pixels[0]), m_in.pixels.length());
+ memcpy(m_orig_img.data,(void *)&(m_in.pixels[0]), m_in.pixels.length());
/* 画像表示 */
#if (!defined WIN32) || (!defined WIN64)
- cvStartWindowThread();
+ cv::startWindowThread();
#endif
- cvShowImage("CaptureImage", m_orig_img);
+ cv::imshow("CaptureImage", m_orig_img);
if (count > 100)
{
Modified: trunk/ImageProcessing/opencv/components/Chromakey/include/Chromakey/Chromakey.h
===================================================================
--- trunk/ImageProcessing/opencv/components/Chromakey/include/Chromakey/Chromakey.h 2016-08-22 02:27:08 UTC (rev 180)
+++ trunk/ImageProcessing/opencv/components/Chromakey/include/Chromakey/Chromakey.h 2017-02-02 00:05:08 UTC (rev 181)
@@ -20,9 +20,7 @@
#include <rtm/idl/InterfaceDataTypesSkel.h>
/* OpenCV用 */
-#include<cv.h>
-#include<cxcore.h>
-#include<highgui.h>
+#include <opencv2/imgproc/imgproc.hpp>
// Service implementation headers
// <rtc-template block="service_impl_h">
@@ -320,18 +318,18 @@
int m_in2_height; // Background入力イメージのHeight
int m_in2_width; // Background入力イメージのWidth
- IplImage* m_image_buff; // Original Image
+ cv::Mat m_image_buff; // Original Image
- IplImage* m_image_extracted; // Extracted Image
+ cv::Mat m_image_extracted; // Extracted Image
- IplImage* m_image_mask; // Mask Image
- IplImage* m_image_inverseMask; // Inverse Mask Image
+ cv::Mat m_image_mask; // Mask Image
+ cv::Mat m_image_inverseMask; // Inverse Mask Image
- IplImage* m_image_BG_in; // Background Input image
- IplImage* m_image_BG; // Background Converted Image(Resized to Camera Image)
- IplImage* m_image_extractedBG; // Extracted Background Image
+ cv::Mat m_image_BG_in; // Background Input image
+ cv::Mat m_image_BG; // Background Converted Image(Resized to Camera Image)
+ cv::Mat m_image_extractedBG; // Extracted Background Image
- IplImage* m_image_destination; // 結果出力用IplImage
+ cv::Mat m_image_destination; // 結果出力用IplImage
};
Modified: trunk/ImageProcessing/opencv/components/Chromakey/src/Chromakey.cpp
===================================================================
--- trunk/ImageProcessing/opencv/components/Chromakey/src/Chromakey.cpp 2016-08-22 02:27:08 UTC (rev 180)
+++ trunk/ImageProcessing/opencv/components/Chromakey/src/Chromakey.cpp 2017-02-02 00:05:08 UTC (rev 181)
@@ -129,19 +129,7 @@
RTC::ReturnCode_t Chromakey::onActivated(RTC::UniqueId ec_id)
{
- m_image_buff = NULL;
- m_image_extracted = NULL;
-
- m_image_mask = NULL;
- m_image_inverseMask = NULL;
-
- m_image_BG_in = NULL;
- m_image_BG = NULL;
- m_image_extractedBG = NULL;
-
- m_image_destination = NULL;
-
m_in_height = 0;
m_in_width = 0;
m_in2_height = 0;
@@ -154,23 +142,40 @@
RTC::ReturnCode_t Chromakey::onDeactivated(RTC::UniqueId ec_id)
{
/* イメージ用メモリの解放 */
- if(m_image_buff != NULL)
- cvReleaseImage(&m_image_buff);
- if(m_image_extracted != NULL)
- cvReleaseImage(&m_image_extracted);
- if(m_image_mask != NULL)
- cvReleaseImage(&m_image_mask);
- if(m_image_inverseMask != NULL)
- cvReleaseImage(&m_image_inverseMask);
- if(m_image_BG_in != NULL)
- cvReleaseImage(&m_image_BG_in);
- if(m_image_BG != NULL)
- cvReleaseImage(&m_image_BG);
- if(m_image_extractedBG != NULL)
- cvReleaseImage(&m_image_extractedBG);
- if(m_image_destination != NULL)
- cvReleaseImage(&m_image_destination);
+ if (!m_image_buff.empty())
+ {
+ m_image_buff.release();
+ }
+ if (!m_image_extracted.empty())
+ {
+ m_image_extracted.release();
+ }
+ if (!m_image_mask.empty())
+ {
+ m_image_mask.release();
+ }
+ if (!m_image_inverseMask.empty())
+ {
+ m_image_inverseMask.release();
+ }
+ if (!m_image_BG_in.empty())
+ {
+ m_image_BG_in.release();
+ }
+ if (!m_image_BG.empty())
+ {
+ m_image_BG.release();
+ }
+ if (!m_image_extractedBG.empty())
+ {
+ m_image_extractedBG.release();
+ }
+ if (!m_image_destination.empty())
+ {
+ m_image_destination.release();
+ }
+
return RTC::RTC_OK;
}
@@ -192,15 +197,15 @@
m_in2_height = m_image_back.height;
m_in2_width = m_image_back.width;
- if(m_image_BG_in != NULL)
- cvReleaseImage(&m_image_BG_in);
+
/* サイズ変換のためTempメモリーを用意する */
- m_image_BG_in = cvCreateImage(cvSize(m_in2_width, m_in2_height), IPL_DEPTH_8U, 3);
+ m_image_BG_in.create(cv::Size(m_in2_width, m_in2_height), CV_8UC3);
+
}
/* InPortの画像データをIplImageのimageDataにコピー */
- memcpy(m_image_BG_in->imageData,(void *)&(m_image_back.pixels[0]), m_image_back.pixels.length());
+ memcpy(m_image_BG_in.data,(void *)&(m_image_back.pixels[0]), m_image_back.pixels.length());
}
/* 新しいデータのチェック */
@@ -216,64 +221,57 @@
m_in_height = m_image_original.height;
m_in_width = m_image_original.width;
- if(m_image_buff != NULL)
- cvReleaseImage(&m_image_buff);
- if(m_image_extracted != NULL)
- cvReleaseImage(&m_image_extracted);
- if(m_image_mask != NULL)
- cvReleaseImage(&m_image_mask);
- if(m_image_inverseMask != NULL)
- cvReleaseImage(&m_image_inverseMask);
- if(m_image_BG != NULL)
- cvReleaseImage(&m_image_BG);
- if(m_image_extractedBG != NULL)
- cvReleaseImage(&m_image_extractedBG);
- if(m_image_destination != NULL)
- cvReleaseImage(&m_image_destination);
- m_image_buff = cvCreateImage(cvSize(m_in_width, m_in_height), IPL_DEPTH_8U, 3 );
- m_image_extracted = cvCreateImage(cvSize(m_in_width, m_in_height), IPL_DEPTH_8U, 3 );
- m_image_mask = cvCreateImage(cvSize(m_in_width, m_in_height), IPL_DEPTH_8U, 1 );
- m_image_inverseMask = cvCreateImage(cvSize(m_in_width, m_in_height), IPL_DEPTH_8U, 1 );
- m_image_BG = cvCreateImage(cvSize(m_in_width, m_in_height), IPL_DEPTH_8U, 3 );
- m_image_extractedBG = cvCreateImage(cvSize(m_in_width, m_in_height), IPL_DEPTH_8U, 3 );
- m_image_destination = cvCreateImage(cvSize(m_in_width, m_in_height), IPL_DEPTH_8U, 3 );
+
+
+
+ m_image_buff.create(cv::Size(m_in_width, m_in_height), CV_8UC3);
+ m_image_extracted.create(cv::Size(m_in_width, m_in_height), CV_8UC3);
+ m_image_mask.create(cv::Size(m_in_width, m_in_height), CV_8UC1);
+ m_image_inverseMask.create(cv::Size(m_in_width, m_in_height), CV_8UC1);
+ m_image_BG.create(cv::Size(m_in_width, m_in_height), CV_8UC3);
+ m_image_extractedBG.create(cv::Size(m_in_width, m_in_height), CV_8UC3);
+ m_image_destination.create(cv::Size(m_in_width, m_in_height), CV_8UC3);
}
// Resize background image to fit Camera image
- if(m_image_BG_in != NULL)
- cvResize(m_image_BG_in, m_image_BG, CV_INTER_LINEAR);
+ if(!m_image_BG_in.empty())
+ cv::resize(m_image_BG_in, m_image_BG, m_image_BG.size());
- memcpy(m_image_buff->imageData,(void *)&(m_image_original.pixels[0]),m_image_original.pixels.length());
+ memcpy(m_image_buff.data,(void *)&(m_image_original.pixels[0]),m_image_original.pixels.length());
// Anternative actions
- CvScalar lowerValue = cvScalar(m_nLowerBlue, m_nLowerGreen, m_nLowerRed);
- CvScalar upperValue = cvScalar(m_nUpperBlue + 1, m_nUpperGreen + 1, m_nUpperRed + 1);
+ cv::Scalar lowerValue = cv::Scalar(m_nLowerBlue, m_nLowerGreen, m_nLowerRed);
+ cv::Scalar upperValue = cv::Scalar(m_nUpperBlue + 1, m_nUpperGreen + 1, m_nUpperRed + 1);
/* RGB各チャンネルごとに範囲内の値以外の画素をマスクに設定する */
- cvInRangeS( m_image_buff, lowerValue, upperValue, m_image_mask );
+ cv::inRange(m_image_buff, lowerValue, upperValue, m_image_mask);
/* 背景画像のうち合成する物体部分の画素値を0にする */
- cvSetZero( m_image_extractedBG );
- cvCopy( m_image_BG, m_image_extractedBG, m_image_mask );
+ m_image_extractedBG = cv::Mat::zeros(m_image_extractedBG.size(), CV_8UC3);
+ m_image_extractedBG.copyTo(m_image_BG, m_image_mask);
+
+
/* マスク画像の0と1を反転する */
- cvNot( m_image_mask, m_image_inverseMask );
+ cv::bitwise_not(m_image_mask, m_image_inverseMask);
+
- /* トラックバーの条件を満たす合成物体が抽出された画像を作成 */
- cvSetZero( m_image_extracted );
- cvCopy( m_image_buff, m_image_extracted, m_image_inverseMask );
+ /* トラックバーの条件を満たす合成物体が抽出された画像を作成 */
+ m_image_extracted = cv::Mat::zeros(m_image_extractedBG.size(), CV_8UC3);
+ m_image_extracted.copyTo(m_image_buff, m_image_inverseMask);
+
/* 背景画像と合成物体画像の合成 */
- cvAdd( m_image_extractedBG, m_image_extracted, m_image_destination, NULL);
+ cv::add( m_image_extractedBG, m_image_extracted, m_image_destination);
// Prepare to out data
- int len = m_image_destination->nChannels * m_image_destination->width * m_image_destination->height;
+ int len = m_image_destination.channels() * m_image_destination.size().width * m_image_destination.size().height;
m_image_output.pixels.length(len);
- m_image_output.width = m_image_destination->width;
- m_image_output.height = m_image_destination->height;
- memcpy((void *)&(m_image_output.pixels[0]), m_image_destination->imageData,len);
+ m_image_output.width = m_image_destination.size().width;
+ m_image_output.height = m_image_destination.size().height;
+ memcpy((void *)&(m_image_output.pixels[0]), m_image_destination.data,len);
m_image_outputOut.write();
}
Modified: trunk/ImageProcessing/opencv/components/Dilationerosion/include/DilationErosion/DilationErosion.h
===================================================================
--- trunk/ImageProcessing/opencv/components/Dilationerosion/include/DilationErosion/DilationErosion.h 2016-08-22 02:27:08 UTC (rev 180)
+++ trunk/ImageProcessing/opencv/components/Dilationerosion/include/DilationErosion/DilationErosion.h 2017-02-02 00:05:08 UTC (rev 181)
@@ -22,9 +22,7 @@
#include <rtm/idl/InterfaceDataTypesSkel.h>
/* OpenCVHeadファイルのIncluding */
-#include <cv.h>
-#include <cxcore.h>
-#include <highgui.h>
+#include <opencv2/imgproc/imgproc.hpp>
// Service implementation headers
// <rtc-template block="service_impl_h">
@@ -302,18 +300,18 @@
// </rtc-template>
int dummy;
- IplImage* m_image_buff;
- IplImage* m_output_image_buff;
- IplImage* m_gray_buff;
- IplImage* m_binary_buff;
- IplImage* m_dilation_buff;
- IplImage* m_erosion_buff;
- IplImage* m_merge_Image;
- IplImage* m_dilation_image;
- IplImage* m_erosion_image;
- IplImage* m_dila_merge_img;
- IplImage* m_ero_merge_img;
- IplImage* m_noise_merge_img;
+ cv::Mat m_image_buff;
+ cv::Mat m_output_image_buff;
+ cv::Mat m_gray_buff;
+ cv::Mat m_binary_buff;
+ cv::Mat m_dilation_buff;
+ cv::Mat m_erosion_buff;
+ cv::Mat m_merge_Image;
+ cv::Mat m_dilation_image;
+ cv::Mat m_erosion_image;
+ cv::Mat m_dila_merge_img;
+ cv::Mat m_ero_merge_img;
+ cv::Mat m_noise_merge_img;
};
Modified: trunk/ImageProcessing/opencv/components/Dilationerosion/src/DilationErosion.cpp
===================================================================
--- trunk/ImageProcessing/opencv/components/Dilationerosion/src/DilationErosion.cpp 2016-08-22 02:27:08 UTC (rev 180)
+++ trunk/ImageProcessing/opencv/components/Dilationerosion/src/DilationErosion.cpp 2017-02-02 00:05:08 UTC (rev 181)
@@ -117,21 +117,8 @@
RTC::ReturnCode_t DilationErosion::onActivated(RTC::UniqueId ec_id)
{
- /* イメージ用メモリの確保 */
+
- m_image_buff = NULL;
- m_gray_buff = NULL;
- m_binary_buff = NULL;
- m_dilation_buff = NULL;
- m_erosion_buff = NULL;
- m_output_image_buff = NULL;
- m_merge_Image = NULL;
- m_dilation_image = NULL;
- m_erosion_image = NULL;
- m_dila_merge_img = NULL;
- m_ero_merge_img = NULL;
- m_noise_merge_img = NULL;
-
return RTC::RTC_OK;
}
@@ -139,42 +126,55 @@
RTC::ReturnCode_t DilationErosion::onDeactivated(RTC::UniqueId ec_id)
{
/* メモリ解放 */
- if(m_image_buff != NULL){
- cvReleaseImage(&m_image_buff);
+ if (!m_image_buff.empty())
+ {
+ m_image_buff.release();
}
- if(m_gray_buff != NULL){
- cvReleaseImage(&m_gray_buff);
+ if (!m_gray_buff.empty())
+ {
+ m_gray_buff.release();
}
- if(m_binary_buff != NULL){
- cvReleaseImage(&m_binary_buff);
+ if (!m_binary_buff.empty())
+ {
+ m_binary_buff.release();
}
- if(m_dilation_buff != NULL){
- cvReleaseImage(&m_dilation_buff);
+ if (!m_dilation_buff.empty())
+ {
+ m_dilation_buff.release();
}
- if(m_erosion_buff != NULL){
- cvReleaseImage(&m_erosion_buff);
+ if (!m_erosion_buff.empty())
+ {
+ m_erosion_buff.release();
}
- if(m_output_image_buff != NULL){
- cvReleaseImage(&m_output_image_buff);
+ if (!m_output_image_buff.empty())
+ {
+ m_output_image_buff.release();
}
- if(m_merge_Image != NULL){
- cvReleaseImage(&m_merge_Image);
+ if (!m_merge_Image.empty())
+ {
+ m_merge_Image.release();
}
- if(m_dilation_image != NULL){
- cvReleaseImage(&m_dilation_image);
+ if (!m_dilation_image.empty())
+ {
+ m_dilation_image.release();
}
- if(m_erosion_image != NULL){
- cvReleaseImage(&m_erosion_image);
+ if (!m_erosion_image.empty())
+ {
+ m_erosion_image.release();
}
- if(m_dila_merge_img != NULL){
- cvReleaseImage(&m_dila_merge_img);
+ if (!m_dila_merge_img.empty())
+ {
+ m_dila_merge_img.release();
}
- if(m_ero_merge_img != NULL){
- cvReleaseImage(&m_ero_merge_img);
+ if (!m_ero_merge_img.empty())
+ {
+ m_ero_merge_img.release();
}
- if(m_noise_merge_img != NULL){
- cvReleaseImage(&m_noise_merge_img);
+ if (!m_noise_merge_img.empty())
+ {
+ m_noise_merge_img.release();
}
+
return RTC::RTC_OK;
}
@@ -187,85 +187,89 @@
m_img_origIn.read();
- m_image_buff = cvCreateImage(cvSize(m_img_orig.width, m_img_orig.height), IPL_DEPTH_8U, 3);
- m_gray_buff = cvCreateImage(cvSize(m_img_orig.width, m_img_orig.height), IPL_DEPTH_8U, 1);
- m_binary_buff = cvCreateImage(cvSize(m_img_orig.width, m_img_orig.height), IPL_DEPTH_8U, 1);
- m_dilation_buff = cvCreateImage(cvSize(m_img_orig.width, m_img_orig.height), IPL_DEPTH_8U, 1);
- m_erosion_buff = cvCreateImage(cvSize(m_img_orig.width, m_img_orig.height), IPL_DEPTH_8U, 1);
- m_output_image_buff = cvCreateImage(cvSize(m_img_orig.width, m_img_orig.height), IPL_DEPTH_8U, 3);
- m_merge_Image = cvCreateImage(cvSize(m_img_orig.width, m_img_orig.height), IPL_DEPTH_8U, 3);
- m_dilation_image = cvCreateImage(cvSize(m_img_orig.width, m_img_orig.height), IPL_DEPTH_8U, 1);
- m_erosion_image = cvCreateImage(cvSize(m_img_orig.width, m_img_orig.height), IPL_DEPTH_8U, 1);
- m_dila_merge_img = cvCreateImage(cvSize(m_img_orig.width, m_img_orig.height), IPL_DEPTH_8U, 3);
- m_ero_merge_img = cvCreateImage(cvSize(m_img_orig.width, m_img_orig.height), IPL_DEPTH_8U, 3);
- m_noise_merge_img = cvCreateImage(cvSize(m_img_orig.width, m_img_orig.height), IPL_DEPTH_8U, 3);
+ m_image_buff.create(cv::Size(m_img_orig.width, m_img_orig.height), CV_8UC3);
+ m_gray_buff.create(cv::Size(m_img_orig.width, m_img_orig.height), CV_8UC1);
+ m_binary_buff.create(cv::Size(m_img_orig.width, m_img_orig.height), CV_8UC1);
+ m_dilation_buff.create(cv::Size(m_img_orig.width, m_img_orig.height), CV_8UC1);
+ m_erosion_buff.create(cv::Size(m_img_orig.width, m_img_orig.height), CV_8UC1);
+ m_output_image_buff.create(cv::Size(m_img_orig.width, m_img_orig.height), CV_8UC3);
+ m_merge_Image.create(cv::Size(m_img_orig.width, m_img_orig.height), CV_8UC3);
+ m_dilation_image.create(cv::Size(m_img_orig.width, m_img_orig.height), CV_8UC1);
+ m_erosion_image.create(cv::Size(m_img_orig.width, m_img_orig.height), CV_8UC1);
+ m_dila_merge_img.create(cv::Size(m_img_orig.width, m_img_orig.height), CV_8UC3);
+ m_ero_merge_img.create(cv::Size(m_img_orig.width, m_img_orig.height), CV_8UC3);
+ m_noise_merge_img.create(cv::Size(m_img_orig.width, m_img_orig.height), CV_8UC3);
+
+
+
/* InPortの映像データ */
- memcpy(m_image_buff->imageData,(void *)&(m_img_orig.pixels[0]), m_img_orig.pixels.length());
+ memcpy(m_image_buff.data,(void *)&(m_img_orig.pixels[0]), m_img_orig.pixels.length());
/* BGRからグレースケールに変換する */
- cvCvtColor( m_image_buff, m_gray_buff, CV_BGR2GRAY );
+ cvtColor(m_image_buff, m_gray_buff, cv::COLOR_BGR2GRAY);
/* グレースケールから2値に変換する */
- cvThreshold( m_gray_buff, m_binary_buff, m_nThreshold, THRESHOLD_MAX_VALUE, CV_THRESH_BINARY );
+ threshold( m_gray_buff, m_binary_buff, m_nThreshold, THRESHOLD_MAX_VALUE, cv::THRESH_BINARY );
/* Dilation/Erosionを行ってノイズを消す */
- cvDilate(m_binary_buff, m_dilation_buff, NULL, m_count_dilation);
- cvErode(m_dilation_buff, m_erosion_buff, NULL, m_count_erosion);
+ dilate(m_binary_buff, m_dilation_buff, cv::Mat(), cv::Point(-1, -1), 1);
+ erode(m_dilation_buff, m_erosion_buff, cv::Mat(), cv::Point(-1, -1), 1);
/* Dilationのみ行う */
- cvDilate(m_binary_buff, m_dilation_image, NULL, m_count_dilation);
+ dilate(m_binary_buff, m_dilation_image, cv::Mat(), cv::Point(-1, -1), 1);
/* Erosionのみ行う */
- cvErode(m_binary_buff, m_erosion_image, NULL, m_count_erosion);
+ erode(m_binary_buff, m_erosion_image, cv::Mat(), cv::Point(-1, -1), 1);
/* 画像データのサイズ取得 */
- int len = (m_output_image_buff->nChannels * m_output_image_buff->width * m_output_image_buff->height);
+ int len = (m_output_image_buff.channels() * m_output_image_buff.size().width * m_output_image_buff.size().height);
m_img_out.pixels.length(len);
m_img_dilation.pixels.length(len);
m_img_erosion.pixels.length(len);
/* DilationImageをマージする */
- cvMerge(m_dilation_image, m_dilation_image, m_dilation_image, NULL, m_dila_merge_img);
+ std::vector<cv::Mat> tmp;
+ tmp.push_back(m_dilation_image);
+ tmp.push_back(m_dilation_image);
+ tmp.push_back(m_dilation_image);
+ cv::merge(tmp, m_dila_merge_img);
/* ErosionImageをマージする */
- cvMerge(m_erosion_image, m_erosion_image, m_erosion_image, NULL, m_ero_merge_img);
+ tmp.clear();
+ tmp.push_back(m_erosion_image);
+ tmp.push_back(m_erosion_image);
+ tmp.push_back(m_erosion_image);
+ cv::merge(tmp, m_ero_merge_img);
/* ノイズを消したImageをマージする */
- cvMerge(m_erosion_buff, m_erosion_buff, m_erosion_buff, NULL, m_noise_merge_img);
+ tmp.clear();
+ tmp.push_back(m_erosion_image);
+ tmp.push_back(m_erosion_image);
+ tmp.push_back(m_erosion_image);
+ cv::merge(tmp, m_noise_merge_img);
/* 該当のイメージをMemCopyする */
- memcpy((void *)&(m_img_out.pixels[0]), m_noise_merge_img->imageData, len);
- memcpy((void *)&(m_img_dilation.pixels[0]), m_dila_merge_img->imageData, len);
- memcpy((void *)&(m_img_erosion.pixels[0]), m_ero_merge_img->imageData, len);
+ memcpy((void *)&(m_img_out.pixels[0]), m_noise_merge_img.data, len);
+ memcpy((void *)&(m_img_dilation.pixels[0]), m_dila_merge_img.data, len);
+ memcpy((void *)&(m_img_erosion.pixels[0]), m_ero_merge_img.data, len);
/* 反転した画像データをOutPortから出力する。 */
- m_img_out.width = m_image_buff->width;
- m_img_out.height = m_image_buff->height;
+ m_img_out.width = m_image_buff.size().width;
+ m_img_out.height = m_image_buff.size().height;
- m_img_dilation.width = m_image_buff->width;
- m_img_dilation.height = m_image_buff->height;
+ m_img_dilation.width = m_image_buff.size().width;
+ m_img_dilation.height = m_image_buff.size().height;
- m_img_erosion.width = m_image_buff->width;
- m_img_erosion.height = m_image_buff->height;
+ m_img_erosion.width = m_image_buff.size().width;
+ m_img_erosion.height = m_image_buff.size().height;
m_img_outOut.write();
m_img_dilationOut.write();
m_img_erosionOut.write();
- cvReleaseImage(&m_image_buff);
- cvReleaseImage(&m_gray_buff);
- cvReleaseImage(&m_binary_buff);
- cvReleaseImage(&m_dilation_buff);
- cvReleaseImage(&m_erosion_buff);
- cvReleaseImage(&m_output_image_buff);
- cvReleaseImage(&m_merge_Image);
- cvReleaseImage(&m_dilation_image);
- cvReleaseImage(&m_erosion_image);
- cvReleaseImage(&m_dila_merge_img);
- cvReleaseImage(&m_ero_merge_img);
- cvReleaseImage(&m_noise_merge_img);
+
}
return RTC::RTC_OK;
Modified: trunk/ImageProcessing/opencv/components/Edge/include/Edge/Edge.h
===================================================================
--- trunk/ImageProcessing/opencv/components/Edge/include/Edge/Edge.h 2016-08-22 02:27:08 UTC (rev 180)
+++ trunk/ImageProcessing/opencv/components/Edge/include/Edge/Edge.h 2017-02-02 00:05:08 UTC (rev 181)
@@ -20,9 +20,7 @@
#include <rtm/idl/InterfaceDataTypesSkel.h>
//OpenCV header file include
-#include <cv.h>
-#include <cxcore.h>
-#include <highgui.h>
+#include <opencv2/imgproc/imgproc.hpp>
// cvConvertScaleAbs用定数
#define SCALE 1 /* ScaleAbs係数 */
@@ -303,13 +301,13 @@
// <rtc-template block="private_operation">
// </rtc-template>
- IplImage* imageBuff;
- IplImage* grayImage;
- IplImage* destinationImage_x;
- IplImage* destinationImage_y;
- IplImage* destinationImage_LAPLACIAN;
- IplImage* destinationEdge;
- IplImage* edgeImage;
+ cv::Mat imageBuff;
+ cv::Mat grayImage;
+ cv::Mat destinationImage_x;
+ cv::Mat destinationImage_y;
+ cv::Mat destinationImage_LAPLACIAN;
+ cv::Mat destinationEdge;
+ cv::Mat edgeImage;
int len;
};
Modified: trunk/ImageProcessing/opencv/components/Edge/src/Edge.cpp
===================================================================
--- trunk/ImageProcessing/opencv/components/Edge/src/Edge.cpp 2016-08-22 02:27:08 UTC (rev 180)
+++ trunk/ImageProcessing/opencv/components/Edge/src/Edge.cpp 2017-02-02 00:05:08 UTC (rev 181)
@@ -122,15 +122,8 @@
RTC::ReturnCode_t Edge::onActivated(RTC::UniqueId ec_id)
{
- /* イメージ用メモリの初期化 */
- imageBuff = NULL;
- grayImage = NULL;
- destinationImage_x = NULL;
- destinationImage_y = NULL;
- destinationImage_LAPLACIAN = NULL;
- destinationEdge = NULL;
- edgeImage = NULL;
+
/* OutPort画面サイズの初期化 */
m_image_edge_sobel_x.width = m_image_edge_sobel_y.width = m_image_edge_LAPLACIAN.width = 0;
m_image_edge_sobel_x.height = m_image_edge_sobel_y.height = m_image_edge_LAPLACIAN.height = 0;
@@ -143,17 +136,40 @@
RTC::ReturnCode_t Edge::onDeactivated(RTC::UniqueId ec_id)
{
- if(imageBuff != NULL)
- {
- cvReleaseImage(&imageBuff);
- cvReleaseImage(&destinationImage_x);
- cvReleaseImage(&destinationImage_y);
- cvReleaseImage(&destinationImage_LAPLACIAN);
- cvReleaseImage(&destinationEdge);
- cvReleaseImage(&grayImage);
- cvReleaseImage(&edgeImage);
- }
+ if (!imageBuff.empty())
+ {
+ imageBuff.release();
+ }
+ if (!destinationImage_x.empty())
+ {
+ destinationImage_x.release();
+ }
+ if (!destinationImage_y.empty())
+ {
+ destinationImage_y.release();
+ }
+ if (!destinationImage_LAPLACIAN.empty())
+ {
+ destinationImage_LAPLACIAN.release();
+ }
+ if (!destinationEdge.empty())
+ {
+ destinationEdge.release();
+ }
+ if (!destinationEdge.empty())
+ {
+ destinationEdge.release();
+ }
+ if (!grayImage.empty())
+ {
+ grayImage.release();
+ }
+ if (!edgeImage.empty())
+ {
+ edgeImage.release();
+ }
+
return RTC::RTC_OK;
}
@@ -171,50 +187,43 @@
m_image_edge_sobel_x.width = m_image_edge_sobel_y.width = m_image_edge_LAPLACIAN.width = m_image_orig.width;
m_image_edge_sobel_x.height = m_image_edge_sobel_y.height = m_image_edge_LAPLACIAN.height = m_image_orig.height;
- /* InPortのイメージサイズが変更された場合 */
- if(imageBuff != NULL)
- {
- cvReleaseImage(&imageBuff);
- cvReleaseImage(&destinationImage_x);
- cvReleaseImage(&destinationImage_y);
- cvReleaseImage(&destinationImage_LAPLACIAN);
- cvReleaseImage(&destinationEdge);
- cvReleaseImage(&grayImage);
- cvReleaseImage(&edgeImage);
- }
+
/* イメージ用メモリの確保 */
- imageBuff = cvCreateImage( cvSize(m_image_orig.width, m_image_orig.height), IPL_DEPTH_8U, 3 );
- grayImage = cvCreateImage( cvSize(m_image_orig.width, m_image_orig.height), IPL_DEPTH_8U, 1 );
- destinationImage_x = cvCreateImage( cvSize(m_image_orig.width, m_image_orig.height), IPL_DEPTH_16S, 1 );
- destinationImage_y = cvCreateImage( cvSize(m_image_orig.width, m_image_orig.height), IPL_DEPTH_16S, 1 );
- destinationImage_LAPLACIAN = cvCreateImage( cvSize(m_image_orig.width, m_image_orig.height), IPL_DEPTH_16S, 1 );
- destinationEdge = cvCreateImage( cvSize(m_image_orig.width, m_image_orig.height), IPL_DEPTH_8U, 1 );
- edgeImage = cvCreateImage( cvSize(m_image_orig.width, m_image_orig.height), IPL_DEPTH_8U, 3 );
+ imageBuff.create(cv::Size(m_image_orig.width, m_image_orig.height), CV_8UC3);
+ grayImage.create(cv::Size(m_image_orig.width, m_image_orig.height), CV_8UC1);
+ destinationImage_x.create(cv::Size(m_image_orig.width, m_image_orig.height), CV_16UC1);
+ destinationImage_y.create(cv::Size(m_image_orig.width, m_image_orig.height), CV_16UC1);
+ destinationImage_LAPLACIAN.create(cv::Size(m_image_orig.width, m_image_orig.height), CV_16UC1);
+ destinationEdge.create(cv::Size(m_image_orig.width, m_image_orig.height), CV_8UC1);
+ edgeImage.create(cv::Size(m_image_orig.width, m_image_orig.height), CV_8UC3);
+
+
}
/* InPortの画面データをコピー */
- memcpy( imageBuff->imageData, (void *)&(m_image_orig.pixels[0]), m_image_orig.pixels.length() );
+ memcpy( imageBuff.data, (void *)&(m_image_orig.pixels[0]), m_image_orig.pixels.length() );
/* RGBからグレースケールに変換 */
- cvCvtColor( imageBuff, grayImage, CV_RGB2GRAY );
+ cv::cvtColor( imageBuff, grayImage, CV_RGB2GRAY );
/* Sobel_X */
/* X方向のSobelオペレータをかける */
- cvSobel( grayImage, destinationImage_x, 1, 0, m_sobel_x_size );
+ cv::Sobel(grayImage, destinationImage_x, destinationImage_x.type(), 1, 0, m_sobel_x_size);
/* 16ビットの符号ありデータを8ビットの符号なしデータに変換する */
- cvConvertScaleAbs( destinationImage_x, destinationEdge, SCALE, SHIFT );
+ cv::convertScaleAbs( destinationImage_x, destinationEdge, SCALE, SHIFT );
/* グレースケールからRGBに変換 */
- cvCvtColor( destinationEdge, edgeImage, CV_GRAY2RGB );
+ cv::cvtColor( destinationEdge, edgeImage, CV_GRAY2RGB );
/* 画像データのサイズ取得 */
- len = edgeImage->nChannels * edgeImage->width * edgeImage->height;
+ len = edgeImage.channels() * edgeImage.size().width * edgeImage.size().height;
+
m_image_edge_sobel_x.pixels.length(len);
/* 反転した画像データをOutPortにコピー */
- memcpy( (void *)&(m_image_edge_sobel_x.pixels[0]), edgeImage->imageData, len );
+ memcpy( (void *)&(m_image_edge_sobel_x.pixels[0]), edgeImage.data, len );
/* 反転した画像データをOutPortから出力 */
m_image_edge_sobel_xOut.write();
@@ -222,29 +231,30 @@
/* Sobel_Y */
/* Y方向のSobelオペレータをかける */
- cvSobel( grayImage, destinationImage_y, 0, 1, m_sobel_y_size );
+ cv::Sobel(grayImage, destinationImage_y, destinationImage_y.type(), 0, 1, m_sobel_y_size);
- cvConvertScaleAbs( destinationImage_y, destinationEdge, SCALE, SHIFT );
+ cv::convertScaleAbs( destinationImage_y, destinationEdge, SCALE, SHIFT );
- cvCvtColor( destinationEdge, edgeImage, CV_GRAY2RGB );
+ cv::cvtColor( destinationEdge, edgeImage, CV_GRAY2RGB );
- len = edgeImage->nChannels * edgeImage->width * edgeImage->height;
+ len = edgeImage.channels() * edgeImage.size().width * edgeImage.size().height;
m_image_edge_sobel_y.pixels.length(len);
- memcpy( (void *)&(m_image_edge_sobel_y.pixels[0]), edgeImage->imageData, len );
+ memcpy( (void *)&(m_image_edge_sobel_y.pixels[0]), edgeImage.data, len );
m_image_edge_sobel_yOut.write();
// LAPLACIAN
- cvLaplace( grayImage, destinationImage_LAPLACIAN, m_laplacian_size );
+ cv::Laplacian(grayImage, destinationImage_LAPLACIAN, m_laplacian_size);
- cvConvertScaleAbs( destinationImage_LAPLACIAN, destinationEdge, SCALE, SHIFT );
+ cv::convertScaleAbs( destinationImage_LAPLACIAN, destinationEdge, SCALE, SHIFT );
- cvCvtColor( destinationEdge, edgeImage, CV_GRAY2RGB );
+ cv::cvtColor( destinationEdge, edgeImage, CV_GRAY2RGB );
- len = edgeImage->nChannels * edgeImage->width * edgeImage->height;
+ len = edgeImage.channels() * edgeImage.size().width * edgeImage.size().height;
+
m_image_edge_LAPLACIAN.pixels.length(len);
- memcpy( (void *)&(m_image_edge_LAPLACIAN.pixels[0]), edgeImage->imageData, len );
+ memcpy( (void *)&(m_image_edge_LAPLACIAN.pixels[0]), edgeImage.data, len );
m_image_edge_LAPLACIANOut.write();
Modified: trunk/ImageProcessing/opencv/components/Findcontour/include/Findcontour/Findcontour.h
===================================================================
--- trunk/ImageProcessing/opencv/components/Findcontour/include/Findcontour/Findcontour.h 2016-08-22 02:27:08 UTC (rev 180)
+++ trunk/ImageProcessing/opencv/components/Findcontour/include/Findcontour/Findcontour.h 2017-02-02 00:05:08 UTC (rev 181)
@@ -20,9 +20,7 @@
#include <rtm/idl/InterfaceDataTypesSkel.h>
//OpenCV header file include
-#include <cv.h>
-#include <cxcore.h>
-#include <highgui.h>
+#include <opencv2/imgproc/imgproc.hpp>
#define THRESHOLD_MAX_VALUE 255 /* 2値化の際に使用する最大値 */
@@ -299,14 +297,14 @@
// <rtc-template block="private_operation">
// </rtc-template>
- IplImage* imageBuff;
- IplImage* grayImage;
- IplImage* binaryImage;
- IplImage* contourImage;
+ cv::Mat imageBuff;
+ cv::Mat grayImage;
+ cv::Mat binaryImage;
+ cv::Mat contourImage;
int find_contour_num;
- CvSeq* find_contour;
- CvScalar red;
- CvScalar green;
+ std::vector<std::vector<cv::Point>> find_contour;
+ cv::Scalar red;
+ cv::Scalar green;
};
Modified: trunk/ImageProcessing/opencv/components/Findcontour/src/Findcontour.cpp
===================================================================
--- trunk/ImageProcessing/opencv/components/Findcontour/src/Findcontour.cpp 2016-08-22 02:27:08 UTC (rev 180)
+++ trunk/ImageProcessing/opencv/components/Findcontour/src/Findcontour.cpp 2017-02-02 00:05:08 UTC (rev 181)
@@ -118,19 +118,15 @@
RTC::ReturnCode_t Findcontour::onActivated(RTC::UniqueId ec_id)
{
- /* イメージ用メモリの初期化 */
- imageBuff = NULL;
- grayImage = NULL;
- binaryImage = NULL;
- contourImage = NULL;
+
/* OutPort画面サイズの初期化 */
m_image_contour.width = 0;
m_image_contour.height = 0;
- find_contour = NULL;
- red = CV_RGB( 255, 0, 0 );
- green = CV_RGB( 0, 255, 0 );
+ find_contour.clear();
+ red = cv::Scalar(255, 0, 0);
+ green = cv::Scalar(0, 255, 0);
return RTC::RTC_OK;
}
@@ -138,15 +134,26 @@
RTC::ReturnCode_t Findcontour::onDeactivated(RTC::UniqueId ec_id)
{
- if(imageBuff != NULL )
+
+
+ if (!imageBuff.empty())
{
- /* イメージ用メモリの解放 */
- cvReleaseImage(&imageBuff);
- cvReleaseImage(&grayImage);
- cvReleaseImage(&binaryImage);
- cvReleaseImage(&contourImage);
+ imageBuff.release();
}
+ if (!grayImage.empty())
+ {
+ grayImage.release();
+ }
+ if (!binaryImage.empty())
+ {
+ binaryImage.release();
+ }
+ if (!contourImage.empty())
+ {
+ contourImage.release();
+ }
+
return RTC::RTC_OK;
}
@@ -165,69 +172,78 @@
m_image_contour.width = m_image_orig.width;
m_image_contour.height = m_image_orig.height;
- /* InPortのイメージサイズが変更された場合 */
- if(imageBuff != NULL)
- {
- cvReleaseImage(&imageBuff);
- cvReleaseImage(&grayImage);
- cvReleaseImage(&binaryImage);
- cvReleaseImage(&contourImage);
- }
+
/* イメージ用メモリの確保 */
- imageBuff = cvCreateImage( cvSize(m_image_orig.width, m_image_orig.height), IPL_DEPTH_8U, 3 );
- grayImage = cvCreateImage( cvSize(m_image_orig.width, m_image_orig.height), IPL_DEPTH_8U, 1 );
- binaryImage = cvCreateImage( cvSize(m_image_orig.width, m_image_orig.height), IPL_DEPTH_8U, 1);
- contourImage = cvCreateImage( cvSize(m_image_orig.width, m_image_orig.height), IPL_DEPTH_8U, 3);
+
+ imageBuff.create(cv::Size(m_image_orig.width, m_image_orig.height), CV_8UC3);
+ grayImage.create(cv::Size(m_image_orig.width, m_image_orig.height), CV_8UC1);
+ binaryImage.create(cv::Size(m_image_orig.width, m_image_orig.height), CV_8UC1);
+ contourImage.create(cv::Size(m_image_orig.width, m_image_orig.height), CV_8UC3);
+
}
/* InPortの画面データをコピー */
- memcpy( imageBuff->imageData, (void *)&(m_image_orig.pixels[0]), m_image_orig.pixels.length() );
- memcpy( contourImage->imageData, (void *)&(m_image_orig.pixels[0]), m_image_orig.pixels.length() );
+ memcpy( imageBuff.data, (void *)&(m_image_orig.pixels[0]), m_image_orig.pixels.length() );
+ memcpy( contourImage.data, (void *)&(m_image_orig.pixels[0]), m_image_orig.pixels.length() );
/* RGBからグレースケールに変換 */
- cvCvtColor( imageBuff, grayImage, CV_RGB2GRAY);
+ cv::cvtColor( imageBuff, grayImage, CV_RGB2GRAY);
/* グレースケールから2値に変換する */
- cvThreshold( grayImage, binaryImage, m_nThresholdLv, THRESHOLD_MAX_VALUE, CV_THRESH_BINARY );
+ cv::threshold( grayImage, binaryImage, m_nThresholdLv, THRESHOLD_MAX_VALUE, cv::THRESH_BINARY );
- /* 抽出された輪郭を保存する領域 */
- CvMemStorage* storage = cvCreateMemStorage( 0 );
+ std::vector<cv::Vec4i> hierarchy;
/* 2値画像中の輪郭を見つけ、その数を返す */
- find_contour_num = cvFindContours(
+ cv::findContours(
binaryImage, /* 入力画像(8ビットシングルチャンネル) */
- storage, /* 抽出された輪郭を保存する領域 */
- &find_contour, /* 一番外側の輪郭へのポインタへのポインタ */
- sizeof( CvContour ), /* シーケンスヘッダのサイズ */
- CV_RETR_LIST, /* 抽出モード */
+ find_contour, /* 一番外側の輪郭へのポインタへのポインタ */
+ hierarchy,
+ CV_RETR_CCOMP, /* 抽出モード */
CV_CHAIN_APPROX_NONE, /* 推定手法 */
- cvPoint( 0, 0 ) /* オフセット */
+ cv::Point( 0, 0 ) /* オフセット */
);
- cvDrawContours(
- contourImage, /* 輪郭を描画する画像 */
- find_contour, /* 最初の輪郭へのポインタ */
- red, /* 外側輪郭線の色 */
- green, /* 内側輪郭線(穴)の色 */
- m_nContourLv, /* 描画される輪郭の最大レベル */
- m_nLineThickness, /* 描画される輪郭線の太さ */
- m_nLineType, /* 線の種類 */
- cvPoint( 0, 0 ) /* オフセット */
- );
+ find_contour_num = find_contour.size();
+
+ cv::drawContours(
+ contourImage, /* 輪郭を描画する画像 */
+ find_contour, /* 最初の輪郭へのポインタ */
+ -1,
+ green, /* 外側輪郭線の色 */
+ m_nLineThickness, /* 描画される輪郭線の太さ */
+ 8,
+ hierarchy,
+ 2
+ );
+
+ cv::drawContours(
+ contourImage, /* 輪郭を描画する画像 */
+ find_contour, /* 最初の輪郭へのポインタ */
+ -1,
+ red, /* 外側輪郭線の色 */
+ m_nLineThickness, /* 描画される輪郭線の太さ */
+ 8,
+ hierarchy,
+ 1
+ );
+
+
+
+
/* 画像データのサイズ取得 */
- int len = contourImage->nChannels * contourImage->width * contourImage->height;
+ int len = contourImage.channels() * contourImage.size().width * contourImage.size().height;
m_image_contour.pixels.length(len);
/* 変転した画像データをOutPortにコピー */
- memcpy((void *)&(m_image_contour.pixels[0]), contourImage->imageData, len);
+ memcpy((void *)&(m_image_contour.pixels[0]), contourImage.data, len);
/* 変転した画像データをOutPortから出力 */
m_image_contourOut.write();
- /* 抽出された輪郭を解放 */
- cvReleaseMemStorage( &storage );
+
}
return RTC::RTC_OK;
}
Modified: trunk/ImageProcessing/opencv/components/Flip/include/Flip/Flip.h
===================================================================
--- trunk/ImageProcessing/opencv/components/Flip/include/Flip/Flip.h 2016-08-22 02:27:08 UTC (rev 180)
+++ trunk/ImageProcessing/opencv/components/Flip/include/Flip/Flip.h 2017-02-02 00:05:08 UTC (rev 181)
@@ -21,9 +21,7 @@
/* OpenCV用インクルードファイルのインクルード */
-#include <cv.h>
-#include <cxcore.h>
-#include <highgui.h>
+#include <opencv2/imgproc/imgproc.hpp>
// Service implementation headers
// <rtc-template block="service_impl_h">
@@ -280,8 +278,8 @@
// <rtc-template block="private_operation">
// </rtc-template>
- IplImage* m_image_buff;
- IplImage* m_flip_image_buff;
+ cv::Mat m_imageBuff;
+ cv::Mat m_flipImageBuff;
};
Modified: trunk/ImageProcessing/opencv/components/Flip/src/Flip.cpp
===================================================================
--- trunk/ImageProcessing/opencv/components/Flip/src/Flip.cpp 2016-08-22 02:27:08 UTC (rev 180)
+++ trunk/ImageProcessing/opencv/components/Flip/src/Flip.cpp 2017-02-02 00:05:08 UTC (rev 181)
@@ -109,10 +109,8 @@
RTC::ReturnCode_t Flip::onActivated(RTC::UniqueId ec_id)
{
- /* イメージ用メモリの初期化 */
- m_image_buff = NULL;
- m_flip_image_buff = NULL;
+
/* OutPortの画面サイズの初期化 */
m_image_flip.width = 0;
m_image_flip.height = 0;
@@ -123,12 +121,12 @@
RTC::ReturnCode_t Flip::onDeactivated(RTC::UniqueId ec_id)
{
- if(m_image_buff != NULL)
- {
- /* イメージ用メモリの解放 */
- cvReleaseImage(&m_image_buff);
- cvReleaseImage(&m_flip_image_buff);
- }
+ if (!m_imageBuff.empty())
+ {
+ // 画像用メモリの解放
+ m_imageBuff.release();
+ m_flipImageBuff.release();
+ }
return RTC::RTC_OK;
}
@@ -136,47 +134,41 @@
RTC::ReturnCode_t Flip::onExecute(RTC::UniqueId ec_id)
{
- /* 新しいデータのチェック */
- if (m_image_origIn.isNew()) {
- /* InPortデータの読み込み */
- m_image_origIn.read();
-
- /* InPortとOutPortの画面サイズ処理およびイメージ用メモリの確保 */
- if( m_image_orig.width != m_image_flip.width || m_image_orig.height != m_image_flip.height)
- {
- m_image_flip.width = m_image_orig.width;
- m_image_flip.height = m_image_orig.height;
-
- /* InPortのイメージサイズが変更された場合 */
- if(m_image_buff != NULL)
- {
- cvReleaseImage(&m_image_buff);
- cvReleaseImage(&m_flip_image_buff);
- }
-
- /* イメージ用メモリの確保 */
- m_image_buff = cvCreateImage(cvSize(m_image_orig.width, m_image_orig.height), IPL_DEPTH_8U, 3);
- m_flip_image_buff = cvCreateImage(cvSize(m_image_orig.width, m_image_orig.height), IPL_DEPTH_8U, 3);
- }
-
- /* InPortの画像データをIplImageのimageDataにコピー */
- memcpy(m_image_buff->imageData,(void *)&(m_image_orig.pixels[0]),m_image_orig.pixels.length());
-
- /* InPortからの画像データを反転する。 m_flip_mode 0: X軸周り, 1: Y軸周り, -1: 両方の軸周り */
- cvFlip(m_image_buff, m_flip_image_buff, m_flip_mode);
-
- /* 画像データのサイズ取得 */
- int len = m_flip_image_buff->nChannels * m_flip_image_buff->width * m_flip_image_buff->height;
- m_image_flip.pixels.length(len);
-
- /* 反転した画像データをOutPortにコピー */
- memcpy((void *)&(m_image_flip.pixels[0]),m_flip_image_buff->imageData,len);
-
- /* 反転した画像データをOutPortから出力する。 */
- m_image_flipOut.write();
- }
-
- return RTC::RTC_OK;
+ // 新しいデータのチェック
+ if (m_image_origIn.isNew()) {
+ // InPortデータの読み込み
+ m_image_origIn.read();
+
+ // InPortとOutPortの画面サイズ処理およびイメージ用メモリの確保
+ if (m_image_orig.width != m_image_flip.width || m_image_orig.height != m_image_flip.height)
+ {
+ m_image_flip.width = m_image_orig.width;
+ m_image_flip.height = m_image_orig.height;
+
+ m_imageBuff.create(cv::Size(m_image_orig.width, m_image_orig.height), CV_8UC3);
+ m_flipImageBuff.create(cv::Size(m_image_orig.width, m_image_orig.height), CV_8UC3);
+
+
+ }
+
+ // InPortの画像データをm_imageBuffにコピー
+ memcpy(m_imageBuff.data, (void *)&(m_image_orig.pixels[0]), m_image_orig.pixels.length());
+
+ // InPortからの画像データを反転する。 m_flipMode 0: X軸周り, 1: Y軸周り, -1: 両方の軸周り
+ cv::flip(m_imageBuff, m_flipImageBuff, m_flip_mode);
+
+ // 画像データのサイズ取得
+ int len = m_flipImageBuff.channels() * m_flipImageBuff.cols * m_flipImageBuff.rows;
+ m_image_flip.pixels.length(len);
+
+ // 反転した画像データをOutPortにコピー
+ memcpy((void *)&(m_image_flip.pixels[0]), m_flipImageBuff.data, len);
+
+ // 反転した画像データをOutPortから出力する。
+ m_image_flipOut.write();
+ }
+
+ return RTC::RTC_OK;
}
/*
Modified: trunk/ImageProcessing/opencv/components/Histogram/include/Histogram/Histogram.h
===================================================================
--- trunk/ImageProcessing/opencv/components/Histogram/include/Histogram/Histogram.h 2016-08-22 02:27:08 UTC (rev 180)
+++ trunk/ImageProcessing/opencv/components/Histogram/include/Histogram/Histogram.h 2017-02-02 00:05:08 UTC (rev 181)
@@ -20,9 +20,7 @@
#include <rtm/idl/InterfaceDataTypesSkel.h>
//OpenCV header file include
-#include <cv.h>
-#include <cxcore.h>
-#include <highgui.h>
+#include <opencv2/imgproc/imgproc.hpp>
#define DIMENSIONS 1 /* ヒストグラムの次元数 */
#define UNIFORM 1 /* 一様性に関するフラグ */
@@ -299,14 +297,14 @@
// <rtc-template block="private_operation">
// </rtc-template>
- IplImage* imageBuff; /* カメラのイメージ */
- IplImage* grayImage;
- IplImage* destinationImage;
- IplImage* histogramImage;
- IplImage* histogramBarImage;
+ cv::Mat imageBuff; /* カメラのイメージ */
+ cv::Mat grayImage;
+ cv::Mat destinationImage;
+ cv::Mat histogramImage;
+ cv::Mat histogramBarImage;
- CvHistogram *histogram;
- CvMat* lookUpTableMatrix; /* 濃度対応行列 */
+ cv::MatND histogram;
+ cv::Mat lookUpTableMatrix; /* 濃度対応行列 */
int histogramSize;
int bin_w;
Modified: trunk/ImageProcessing/opencv/components/Histogram/src/Histogram.cpp
===================================================================
--- trunk/ImageProcessing/opencv/components/Histogram/src/Histogram.cpp 2016-08-22 02:27:08 UTC (rev 180)
+++ trunk/ImageProcessing/opencv/components/Histogram/src/Histogram.cpp 2017-02-02 00:05:08 UTC (rev 181)
@@ -115,13 +115,8 @@
RTC::ReturnCode_t Histogram::onActivated(RTC::UniqueId ec_id)
{
- /* イメージ用メモリの初期化 */
- imageBuff = NULL;
- grayImage = NULL;
- destinationImage = NULL;
- histogramImage = NULL;
- histogramBarImage = NULL;
+
/* OutPort画面サイズの初期化 */
m_image_histogram.width = 0;
m_image_histogram.height = 0;
@@ -129,22 +124,31 @@
m_image_histogramImage.height = 0;
/* ヒストグラムに描画される縦棒の数 */
+ //histogramSize = 128;
+ /* ヒストグラムの範囲 */
+ //range_0[0] = 0;
+ //range_0[1] = 256;
+
+ /* ヒストグラム各次元の範囲を示す配列のポインタ */
+ //ranges[0] = range_0 ;
+
+ /* ヒストグラムに描画される縦棒の数 */
histogramSize = 128;
/* ヒストグラムの範囲 */
range_0[0] = 0;
range_0[1] = 256;
- /* ヒストグラム各次元の範囲を示す配列のポインタ */
- ranges[0] = range_0 ;
-
/* ヒストグラムを生成 */
- histogram = cvCreateHist( DIMENSIONS, &histogramSize, CV_HIST_ARRAY, ranges, UNIFORM );
+ //histogram = cvCreateHist( DIMENSIONS, &histogramSize, CV_HIST_ARRAY, ranges, UNIFORM );
/* 行列を生成 */
- lookUpTableMatrix = cvCreateMatHeader( 1, 256, CV_8UC1 );
+ lookUpTableMatrix.create(cv::Size(1, 256), CV_8UC1);
+ //lookUpTableMatrix = cvCreateMatHeader( 1, 256, CV_8UC1 );
/* 濃度対応行列に濃度対応表をセット */
- cvSetData( lookUpTableMatrix, lookUpTable, 0 );
+ //lookUpTableMatrix.setTo(lookUpTable);
+
+ //cv::setData( lookUpTableMatrix, lookUpTable, 0 );
return RTC::RTC_OK;
}
@@ -152,16 +156,31 @@
RTC::ReturnCode_t Histogram::onDeactivated(RTC::UniqueId ec_id)
{
- if( imageBuff != NULL )
+
+
+
+ if (!imageBuff.empty())
{
- /* イメージ用メモリの解放 */
- cvReleaseImage(&imageBuff);
- cvReleaseImage(&grayImage);
- cvReleaseImage(&destinationImage);
- cvReleaseImage(&histogramImage);
- cvReleaseImage(&histogramBarImage);
+ imageBuff.release();
}
+ if (!grayImage.empty())
+ {
+ grayImage.release();
+ }
+ if (!destinationImage.empty())
+ {
+ destinationImage.release();
+ }
+ if (!histogramImage.empty())
+ {
+ histogramImage.release();
+ }
+ if (!histogramBarImage.empty())
+ {
+ histogramBarImage.release();
+ }
+
return RTC::RTC_OK;
}
@@ -180,32 +199,24 @@
m_image_histogram.width = m_image_histogramImage.width = m_image_orig.width;
m_image_histogram.height = m_image_histogramImage.height = m_image_orig.height;
- /* InPortのイメージサイズが変更された場合 */
- if( imageBuff != NULL )
- {
- cvReleaseImage(&imageBuff);
- cvReleaseImage(&grayImage);
- cvReleaseImage(&destinationImage);
- cvReleaseImage(&histogramImage);
- cvReleaseImage(&histogramBarImage);
- }
+
/* イメージ用メモリの確保 */
- imageBuff = cvCreateImage( cvSize(m_image_orig.width, m_image_orig.height), IPL_DEPTH_8U, 3 );
- grayImage = cvCreateImage( cvSize(m_image_orig.width, m_image_orig.height), IPL_DEPTH_8U, 1 );
- destinationImage = cvCreateImage( cvSize(m_image_orig.width, m_image_orig.height), IPL_DEPTH_8U, 1 );
- histogramImage = cvCreateImage( cvSize(m_image_orig.width, m_image_orig.height), IPL_DEPTH_8U, 3 );
- histogramBarImage = cvCreateImage( cvSize(m_image_orig.width, m_image_orig.height), IPL_DEPTH_8U, 3 );
-
+ imageBuff.create(cv::Size(m_image_orig.width, m_image_orig.height), CV_8UC3);
+ grayImage.create(cv::Size(m_image_orig.width, m_image_orig.height), CV_8UC1);
+ destinationImage.create(cv::Size(m_image_orig.width, m_image_orig.height), CV_8UC1);
+ histogramBarImage.create(cv::Size(m_image_orig.width, m_image_orig.height), CV_8UC3);
+ imageBuff.create(cv::Size(m_image_orig.width, m_image_orig.height), CV_8UC3);
/* ヒストグラムの縦棒の横幅を計算する */
- bin_w = cvRound( ( double )histogramBarImage->width / histogramSize );
+ bin_w = cvRound( ( double )histogramBarImage.size().width / histogramSize );
+
}
/* InPortの画面データをコピー */
- memcpy(imageBuff->imageData,(void *)&(m_image_orig.pixels[0]),m_image_orig.pixels.length());
+ memcpy(imageBuff.data,(void *)&(m_image_orig.pixels[0]),m_image_orig.pixels.length());
/* RGBからグレースケールに変換 */
- cvCvtColor( imageBuff, grayImage, CV_RGB2GRAY);
+ cv::cvtColor( imageBuff, grayImage, CV_RGB2GRAY);
int brightness = m_brightness - TRACKBAR_MAX_VALUE / 2; // 輝度値
int contrast = m_contrast - TRACKBAR_MAX_VALUE / 2; // コントラスト
@@ -225,7 +236,7 @@
if( v > 255 ){
v = 255;
}
- lookUpTable[i] = ( unsigned char )v;
+ lookUpTableMatrix.at<uchar>(i) = (unsigned char)v;
}
} else {
double delta = -128.0 * contrast / 100.0;
@@ -240,47 +251,65 @@
if( v > 255 ){
v = 255;
}
- lookUpTable[i] = ( unsigned char )v;
+ lookUpTableMatrix.at<uchar>(i) = (unsigned char)v;
}
}
-
+
/* 濃度対応行列を用いた濃度階調変換を行う */
- cvLUT( grayImage, destinationImage, lookUpTableMatrix );
+ cv::LUT(grayImage, lookUpTableMatrix, destinationImage);
/* グレースケールからRGBに変換 */
- cvCvtColor( destinationImage, histogramImage, CV_GRAY2RGB );
+ cv::cvtColor( destinationImage, histogramImage, CV_GRAY2RGB );
/* 画像データのサイズ取得 */
- int len = histogramImage->nChannels * histogramImage->width * histogramImage->height;
+ int len = histogramImage.channels() * histogramImage.size().width * histogramImage.size().height;
m_image_histogramImage.pixels.length(len);
/* 変転した画像データをOutPortにコピー */
- memcpy((void *)&(m_image_histogramImage.pixels[0]), histogramImage->imageData,len);
+ memcpy((void *)&(m_image_histogramImage.pixels[0]), histogramImage.data,len);
/* 変転した画像データをOutPortから出力 */
m_image_histogramImageOut.write();
/* 画像のヒストグラムを計算する */
- cvCalcHist( &destinationImage, histogram, ACCUMULATE, NULL );
+ int image_num = 1;
+ int channels[] = { 0 };
+
+
+
+ const float *ranges[] = { range_0 };
+
+
+
+
+ /* ヒストグラム各次元の範囲を示す配列のポインタ */
+
+
+
+ cv::calcHist(&destinationImage, image_num, channels, cv::Mat(), histogram, DIMENSIONS, &histogramSize, ranges);
+
float max_value = 0;
/* ヒストグラム値の最大値を得る */
- cvGetMinMaxHistValue( histogram, NULL, &max_value, NULL, NULL );
+ //cvGetMinMaxHistValue( histogram, NULL, &max_value, NULL, NULL );
/* ヒストグラムを最大値によって正規化する */
- cvConvertScale( histogram->bins, histogram->bins,
- ( ( double )histogramBarImage->height ) / max_value, SCALE_SHIFT );
+ //histogram.convertTo(histogram, CV_32F, (double)histogramBarImage.size().height);
+ cv::normalize(histogram, histogram, (double)histogramBarImage.size().height);
+ //cvConvertScale( histogram->bins, histogram->bins,
+ //( ( double )histogramBarImage->height ) / max_value, SCALE_SHIFT );
/* ヒストグラム画像を白で初期化する */
- cvSet( histogramBarImage, cvScalarAll( 255 ), NULL );
-
+ histogramBarImage.setTo(cv::Scalar::all(255));
+
+
/* ヒストグラムの縦棒を描画する */
for ( int i = 0; i < histogramSize; i++ ) {
- cvRectangle(
+ cv::rectangle(
histogramBarImage,
- cvPoint( i * bin_w, histogramBarImage->height ),
- cvPoint( ( i + 1 ) * bin_w,histogramBarImage->height - cvRound( cvGetReal1D( histogram->bins, i) ) ),
- cvScalarAll( 0 ),
+ cvPoint( i * bin_w, histogramBarImage.size().height ),
+ cvPoint((i + 1) * bin_w, histogramBarImage.size().height - cvRound(histogram.at<float>(i))),
+ cv::Scalar::all(0),
LINE_THICKNESS,
LINE_TYPE,
SHIFT
@@ -288,11 +317,11 @@
}
/* 画像データのサイズ取得 */
- len = histogramBarImage->nChannels * histogramBarImage->width * histogramBarImage->height;
+ len = histogramBarImage.channels() * histogramBarImage.size().width * histogramBarImage.size().height;
m_image_histogram.pixels.length(len);
/* 変転した画像データをOutPortにコピー */
- memcpy((void *)&(m_image_histogram.pixels[0]), histogramBarImage->imageData,len);
+ memcpy((void *)&(m_image_histogram.pixels[0]), histogramBarImage.data,len);
/* 変転した画像データをOutPortから出力 */
m_image_histogramOut.write();
Modified: trunk/ImageProcessing/opencv/components/Houghline/include/Hough/Hough.h
===================================================================
--- trunk/ImageProcessing/opencv/components/Houghline/include/Hough/Hough.h 2016-08-22 02:27:08 UTC (rev 180)
+++ trunk/ImageProcessing/opencv/components/Houghline/include/Hough/Hough.h 2017-02-02 00:05:08 UTC (rev 181)
@@ -21,9 +21,7 @@
#include <rtm/idl/InterfaceDataTypesSkel.h>
//OpenCV header file include
-#include <cv.h>
-#include <cxcore.h>
-#include <highgui.h>
+#include <opencv2/imgproc/imgproc.hpp>
/* cvCanny用定数 */
#define APERTURE_SIZE 3 /* Sobelオペレータのサイズ (リファレンス参照) */
@@ -350,14 +348,14 @@
// <rtc-template block="private_operation">
// </rtc-template>
- IplImage* imageBuff;
- IplImage* grayImage;
- IplImage* edgeImage;
- IplImage* hough;
- IplImage* houghImage;
+ cv::Mat imageBuff;
+ cv::Mat grayImage;
+ cv::Mat edgeImage;
+ cv::Mat hough;
+ cv::Mat houghImage;
int len;
- CvSeq *lines;
+
int debug_method; /* configuration切り替え時の確認用 */
int debug_type; /* configuration切り替え時の確認用 */
Modified: trunk/ImageProcessing/opencv/components/Houghline/src/Hough.cpp
===================================================================
--- trunk/ImageProcessing/opencv/components/Houghline/src/Hough.cpp 2016-08-22 02:27:08 UTC (rev 180)
+++ trunk/ImageProcessing/opencv/components/Houghline/src/Hough.cpp 2017-02-02 00:05:08 UTC (rev 181)
@@ -52,7 +52,7 @@
"conf.__constraints__.canny_thresld1", "0<=x<=255",
"conf.__constraints__.canny_thresld2", "0<=x<=255",
"conf.__constraints__.hough_method", "(PROBABILISTIC,STANDARD,MULTI_SCALE)",
- "conf.__constraints__.hough_thresld", "0<=x<=255",
+ "conf.__constraints__.hough_thresld", "1<=x<=255",
"conf.__constraints__.line_type", "(8,4,CV_AA)",
""
};
@@ -141,16 +141,11 @@
RTC::ReturnCode_t Hough::onActivated(RTC::UniqueId ec_id)
{
- /* イメージ用メモリの初期化 */
- imageBuff = NULL;
- grayImage = NULL;
- edgeImage = NULL;
- hough = NULL;
- houghImage = NULL;
+
m_in_height = 0;
m_in_width = 0;
- lines = NULL;
+
len=0;
debug_method = -1;
@@ -162,16 +157,30 @@
RTC::ReturnCode_t Hough::onDeactivated(RTC::UniqueId ec_id)
{
- if(imageBuff != NULL)
+
+
+ if (!imageBuff.empty())
{
- /* イメージ用メモリの解放 */
- cvReleaseImage(&imageBuff);
- cvReleaseImage(&grayImage);
- cvReleaseImage(&edgeImage);
- cvReleaseImage(&hough);
- cvReleaseImage(&houghImage);
+ imageBuff.release();
}
+ if (!grayImage.empty())
+ {
+ grayImage.release();
+ }
+ if (!edgeImage.empty())
+ {
+ edgeImage.release();
+ }
+ if (!hough.empty())
+ {
+ hough.release();
+ }
+ if (!houghImage.empty())
+ {
+ houghImage.release();
+ }
+
return RTC::RTC_OK;
}
@@ -190,63 +199,65 @@
m_in_width = m_image_orig.width;
m_in_height = m_image_orig.height;
- /* InPortのイメージサイズが変更された場合 */
- if(imageBuff != NULL)
- {
- cvReleaseImage(&imageBuff);
- cvReleaseImage(&grayImage);
- cvReleaseImage(&edgeImage);
- cvReleaseImage(&hough);
- cvReleaseImage(&houghImage);
- }
+
/* イメージ用メモリの確保 */
- imageBuff = cvCreateImage( cvSize(m_in_width, m_in_height), IPL_DEPTH_8U, 3 );
- grayImage = cvCreateImage( cvSize(m_in_width, m_in_height), IPL_DEPTH_8U, 1 );
- edgeImage = cvCreateImage( cvSize(m_in_width, m_in_height), IPL_DEPTH_8U, 1 );
- hough = cvCreateImage( cvSize(m_in_width, m_in_height), IPL_DEPTH_8U, 3 );
- houghImage = cvCreateImage( cvSize(m_in_width, m_in_height), IPL_DEPTH_8U, 3 );
+
+ imageBuff.create(cv::Size(m_in_width, m_in_height), CV_8UC3);
+ grayImage.create(cv::Size(m_in_width, m_in_height), CV_8UC1);
+ edgeImage.create(cv::Size(m_in_width, m_in_height), CV_8UC1);
+ hough.create(cv::Size(m_in_width, m_in_height), CV_8UC3);
+ houghImage.create(cv::Size(m_in_width, m_in_height), CV_8UC3);
+
}
/* InPortの画面データをコピー */
- memcpy( imageBuff->imageData, (void *)&(m_image_orig.pixels[0]), m_image_orig.pixels.length() );
+ memcpy( imageBuff.data, (void *)&(m_image_orig.pixels[0]), m_image_orig.pixels.length() );
/* RGBからグレースケールに変換 */
- cvCvtColor( imageBuff, grayImage, CV_RGB2GRAY );
+ cv::cvtColor( imageBuff, grayImage, CV_RGB2GRAY );
/* ハフ変換に必要なメモリ領域 */
- CvMemStorage *storage = cvCreateMemStorage( 0 );
+ std::vector<cv::Vec4i> storage;
/* エッジ抽出を行う */
- cvCanny( grayImage, edgeImage, m_canny_threshold1, m_canny_threshold2, APERTURE_SIZE );
+ cv::Canny( grayImage, edgeImage, m_canny_threshold1, m_canny_threshold2, APERTURE_SIZE );
/* グレースケールからRGBに変換する */
- cvCvtColor( edgeImage, houghImage, CV_GRAY2RGB );
+ cv::cvtColor( edgeImage, houghImage, CV_GRAY2RGB );
+ std::vector<cv::Vec2f> lines;
+ std::vector<cv::Vec4i> lines_P;
/* ハフ変換により直線の抽出を行う */
int hough_method;
if ( m_hough_method == "PROBABILISTIC" )
{
/* 確率的ハフ変換 */
- hough_method = CV_HOUGH_PROBABILISTIC;
+ //hough_method = CV_HOUGH_PROBABILISTIC;
+ cv::HoughLinesP(edgeImage, lines_P, RHO, THETA, m_hough_threshold);
}
else if ( m_hough_method == "STANDARD" )
{
/* 標準的ハフ変換 */
- hough_method = CV_HOUGH_STANDARD;
+ //hough_method = CV_HOUGH_STANDARD;
+ cv::HoughLines(edgeImage, lines, RHO, THETA, m_hough_threshold);
}
else
{
/* マルチスケール型の古典的ハフ変換 */
- hough_method = CV_HOUGH_MULTI_SCALE;
+ //hough_method = CV_HOUGH_MULTI_SCALE;
+ cv::HoughLines(edgeImage, lines, RHO, THETA, m_hough_threshold, m_hough_param1, m_hough_param2);
+
}
- if ( hough_method != debug_method )
+ /*if ( hough_method != debug_method )
{
std::cout << "hough_method = " << hough_method << std::endl;
debug_method = hough_method;
- }
- lines = cvHoughLines2( edgeImage, storage, hough_method, RHO, THETA, m_hough_threshold, m_hough_param1, m_hough_param2 );
+ }*/
+
+ //cv::HoughCircles(edgeImage, lines, hough_method, RHO, m_hough_threshold , m_hough_param1, m_hough_param2);
+
/* 抽出された直線を描く */
int line_type;
if ( m_line_type == "CV_AA" )
@@ -257,30 +268,55 @@
{
line_type = atoi( m_line_type.c_str() );
}
- if ( line_type != debug_type )
+ /*if ( line_type != debug_type )
{
std::cout << "line_type = " << line_type << std::endl;
debug_type = line_type;
- }
- for ( int i = 0; i < lines->total; i++ ) {
- CvPoint *line = ( CvPoint* )cvGetSeqElem( lines, i );
- cvLine( houghImage, line[0], line[1], CV_RGB( m_line_color_R, m_line_color_G, m_line_color_B ), m_line_thickness, line_type, SHIFT );
- }
+ }*/
+
+ if (m_hough_method == "PROBABILISTIC")
+ {
+ for (unsigned int i = 0; i < lines_P.size(); i++) {
+ //CvPoint *line = ( CvPoint* )cv::getSeqElem( lines, i );
+ cv::line(houghImage, cv::Point(lines_P[i][0], lines_P[i][1]), cv::Point(lines_P[i][2], lines_P[i][3]),
+ cv::Scalar(m_line_color_R, m_line_color_G, m_line_color_B), m_line_thickness,
+ line_type, SHIFT);
+ }
+ }
+ else
+ {
+ for (unsigned int i = 0; i < lines.size(); i++) {
+ float rho = lines[i][0];
+ float theta = lines[i][1];
+ cv::Point pt1, pt2;
+ double a = cos(theta);
+ double b = sin(theta);
+ double x0 = a*rho;
+ double y0 = b*rho;
+ pt1.x = cvRound(x0 + 1000 * (-b));
+ pt1.y = cvRound(y0 + 1000 * (a));
+ pt2.x = cvRound(x0 - 1000 * (-b));
+ pt2.y = cvRound(y0 - 1000 * (a));
+ //CvPoint *line = ( CvPoint* )cv::getSeqElem( lines, i );
+ cv::line(houghImage, pt1, pt2, cv::Scalar(m_line_color_R, m_line_color_G, m_line_color_B), m_line_thickness,
+ line_type, SHIFT);
+ }
+ }
+
/* 画像データのサイズ取得 */
- len = houghImage->nChannels * houghImage->width * houghImage->height;
+ len = houghImage.channels() * houghImage.size().width * houghImage.size().height;
m_image_hough.pixels.length(len);
- m_image_hough.width = houghImage->width;
- m_image_hough.height = houghImage->height;
+ m_image_hough.width = houghImage.size().width;
+ m_image_hough.height = houghImage.size().height;
/* 反転した画像データをOutPortにコピー */
- memcpy( (void *)&(m_image_hough.pixels[0]), houghImage->imageData, len );
+ memcpy( (void *)&(m_image_hough.pixels[0]), houghImage.data, len );
/* 反転した画像データをOutPortから出力 */
m_image_houghOut.write();
- /* ハフ変換に使用したメモリ解放 */
- cvReleaseMemStorage(&storage);
+
}
return RTC::RTC_OK;
Modified: trunk/ImageProcessing/opencv/components/ImageCalibration/include/ImageCalibration/CalibrationServiceSVC_impl.h
===================================================================
--- trunk/ImageProcessing/opencv/components/ImageCalibration/include/ImageCalibration/CalibrationServiceSVC_impl.h 2016-08-22 02:27:08 UTC (rev 180)
+++ trunk/ImageProcessing/opencv/components/ImageCalibration/include/ImageCalibration/CalibrationServiceSVC_impl.h 2017-02-02 00:05:08 UTC (rev 181)
@@ -18,10 +18,10 @@
#include <coil/Mutex.h>
/* OpenCVHeadファイルのIncluding */
-#include <cv.h>
-#include <cxcore.h>
-#include <highgui.h>
-
+#include <opencv2/imgproc/imgproc.hpp>
+#include <opencv2/imgcodecs/imgcodecs.hpp>
+#include <opencv2/calib3d/calib3d.hpp>
+
/*!
* @class CalibrationServiceSVC_impl
* Example class implementing IDL interface ImageCalibService::CalibrationService
Modified: trunk/ImageProcessing/opencv/components/ImageCalibration/src/CalibrationServiceSVC_impl.cpp
===================================================================
--- trunk/ImageProcessing/opencv/components/ImageCalibration/src/CalibrationServiceSVC_impl.cpp 2016-08-22 02:27:08 UTC (rev 180)
+++ trunk/ImageProcessing/opencv/components/ImageCalibration/src/CalibrationServiceSVC_impl.cpp 2017-02-02 00:05:08 UTC (rev 181)
@@ -138,16 +138,16 @@
cv::Mat gray;
RTC::CameraImage image;
/* チェッカー交点座標を格納する行列 */
- cv::vector<cv::vector<cv::Point2f> > imagePoints(m_current_image_num);
+ std::vector<std::vector<cv::Point2f> > imagePoints(m_current_image_num);
/* チェッカー交点座標と対応する世界座標の値を格納する行列 */
- cv::vector<cv::vector<cv::Point3f> > worldPoints(m_current_image_num);
+ std::vector<std::vector<cv::Point3f> > worldPoints(m_current_image_num);
/* カメラパラメータ行列 */
cv::Mat cameraMatrix;
cv::Mat distCoeffs;
- cv::vector<cv::Mat> rotationVectors;
- cv::vector<cv::Mat> translationVectors;
+ std::vector<cv::Mat> rotationVectors;
+ std::vector<cv::Mat> translationVectors;
/* コーナー位置高精度化のための繰り返し処理の停止基準
* 「反復回数が20回に達する」または「イプシロンが0.001に達する」どちらかの条件を満たした時に終了する
Modified: trunk/ImageProcessing/opencv/components/ImageCalibration/src/ImageCalibration.cpp
===================================================================
--- trunk/ImageProcessing/opencv/components/ImageCalibration/src/ImageCalibration.cpp 2016-08-22 02:27:08 UTC (rev 180)
+++ trunk/ImageProcessing/opencv/components/ImageCalibration/src/ImageCalibration.cpp 2017-02-02 00:05:08 UTC (rev 181)
@@ -134,7 +134,7 @@
RTC::ReturnCode_t ImageCalibration::onExecute(RTC::UniqueId ec_id)
{
cv::Mat image, gray;
- cv::vector<cv::Point2f> imagePoints; /* チェッカー交点座標を格納する行列 */
+ std::vector<cv::Point2f> imagePoints; /* チェッカー交点座標を格納する行列 */
/* コーナー位置高精度化のための繰り返し処理の停止基準
* 「反復回数が20回に達する」または「イプシロンが0.001に達する」どちらかの条件を満たした時に終了する
@@ -157,7 +157,7 @@
memcpy(image.data,(void *)&(m_img_orig.pixels[0]), m_img_orig.pixels.length());
/* グレースケールに変換 */
- cv::cvtColor(image, gray, CV_BGR2GRAY);
+ cv::cvtColor(image, gray, cv::COLOR_BGR2GRAY);
/* プロバイダクラスへデータを渡す */
RTC::CameraImage currentImg;
Modified: trunk/ImageProcessing/opencv/components/ImageSubstraction/include/ImageSubstraction/ImageSubstraction.h
===================================================================
--- trunk/ImageProcessing/opencv/components/ImageSubstraction/include/ImageSubstraction/ImageSubstraction.h 2016-08-22 02:27:08 UTC (rev 180)
+++ trunk/ImageProcessing/opencv/components/ImageSubstraction/include/ImageSubstraction/ImageSubstraction.h 2017-02-02 00:05:08 UTC (rev 181)
@@ -19,9 +19,7 @@
#include <rtm/idl/ExtendedDataTypesSkel.h>
#include <rtm/idl/InterfaceDataTypesSkel.h>
-#include <cv.h>
-#include <cxcore.h>
-#include <highgui.h>
+#include <opencv2/imgproc/imgproc.hpp>
#define NUM_OF_BACKGROUND_FRAMES 50 /* 背景モデルを生成するのに使用する画像の枚数 */
@@ -314,8 +312,23 @@
// <rtc-template block="consumer_declare">
// </rtc-template>
+ void initializeBackgroundModel(int num, cv::Size size, double thre_coefficient);
private:
+ cv::Mat backgroundAverageImage; /* 背景の平均値保存用IplImage */
+ cv::Mat backgroundThresholdImage; /* 背景の閾値保存用IplImage */
+
+ cv::Mat originalImage; /* キャプチャ画像用IplImage */
+ cv::Mat differenceImage; /* 差分画像用IplImage */
+ cv::Mat resultImage;
+ cv::Mat outputImage;
+
+ int ImageSubstraction_count = 0;
+ int mode = DYNAMIC_MODE;
+ int g_temp_w = 0;
+ int g_temp_h = 0;
+
+ static const char* mode_str[2];
// <rtc-template block="private_attribute">
// </rtc-template>
@@ -327,6 +340,7 @@
};
+
extern "C"
{
DLL_EXPORT void ImageSubstractionInit(RTC::Manager* manager);
Modified: trunk/ImageProcessing/opencv/components/ImageSubstraction/src/ImageSubstraction.cpp
===================================================================
--- trunk/ImageProcessing/opencv/components/ImageSubstraction/src/ImageSubstraction.cpp 2016-08-22 02:27:08 UTC (rev 180)
+++ trunk/ImageProcessing/opencv/components/ImageSubstraction/src/ImageSubstraction.cpp 2017-02-02 00:05:08 UTC (rev 181)
@@ -43,24 +43,13 @@
};
// </rtc-template>
-int ImageSubstraction_count = 0;
-int mode = DYNAMIC_MODE;
-int g_temp_w = 0;
-int g_temp_h = 0;
-std::string mode_str[2] = {
+const char* ImageSubstraction::mode_str[2] = {
"DYNAMIC_MODE", /* 画素ごとに異なる閾値 */
"CONSTANT_MODE" /* 画像全体で一つの閾値 */
};
-IplImage *backgroundAverageImage = NULL; /* 背景の平均値保存用IplImage */
-IplImage *backgroundThresholdImage = NULL; /* 背景の閾値保存用IplImage */
-IplImage *originalImage; /* キャプチャ画像用IplImage */
-IplImage *differenceImage; /* 差分画像用IplImage */
-IplImage *resultImage;
-IplImage *outputImage;
-
//
// 背景モデルを初期化する
//
@@ -68,61 +57,71 @@
// num : 背景モデルを生成するのに使用する画像の枚数
// size : 画像サイズ
//
-void initializeBackgroundModel( int num, CvSize size, double thre_coefficient ){
+void ImageSubstraction::initializeBackgroundModel(int num, cv::Size size, double thre_coefficient){
int i;
- /* 以前の背景情報があれば破棄 */
- if( backgroundAverageImage != NULL ){
- cvReleaseImage( &backgroundAverageImage );
- }
- if( backgroundThresholdImage != NULL ){
- cvReleaseImage( &backgroundThresholdImage );
- }
+
/* 画像情報蓄積用バッファを確保する */
- IplImage *acc = cvCreateImage( size, IPL_DEPTH_32F, 3 );
- IplImage *acc2 = cvCreateImage( size, IPL_DEPTH_32F, 3 );
+ cv::Mat acc = cv::Mat::zeros(size, CV_32FC3);
+ cv::Mat acc2 = cv::Mat::zeros(size, CV_32FC3);
+
- /* 画像の初期化を行う */
- cvSetZero( acc );
- cvSetZero( acc2 );
+
+
+
/* 画像情報の蓄積 */
printf( "Getting background...\n" );
//IplImage *frameImage;
for( i = 0; i < num; i++ ){
//frameImage = cvQueryFrame( capture );
- cvAcc( originalImage, acc );
- cvSquareAcc( originalImage, acc2 );
+ std::cout << "f1" << "\t" << originalImage.size() << "\t" << acc.size() << std::endl;
+ cv::accumulate(originalImage, acc, cv::noArray());
+ std::cout << "f2" << std::endl;
+ cv::accumulateSquare(originalImage, acc2, cv::noArray());
+ std::cout << "f3" << std::endl;
printf( "%d / %d image\n", i + 1, num );
}
printf( "Completion!\n" );
+
/* cvAddS, cvSubS はあるが cvMulS はないので、cvConvertScale を使う */
- cvConvertScale( acc, acc, 1.0 / num ); /* 平均 */
- cvConvertScale( acc2, acc2, 1.0 / num ); /* 二乗和の平均 */
+ acc.convertTo(acc, CV_32F, 1.0 / num); /* 平均 */
+ acc2.convertTo(acc2, CV_32F, 1.0 / num); /* 二乗和の平均 */
+
+
/* 平均が求まったので backgroundAverageImage に格納する */
- backgroundAverageImage = cvCreateImage( size, IPL_DEPTH_8U, 3 );
- cvConvert( acc, backgroundAverageImage );
+ backgroundAverageImage.create(size, CV_8UC3);
+ acc.convertTo(backgroundAverageImage, CV_8UC3);
+
+
/* 分散を計算する */
- IplImage *dispersion = cvCreateImage( size, IPL_DEPTH_32F, 3 );
- cvMul( acc, acc, acc );
- cvSub( acc2, acc, dispersion );
+ cv::Mat dispersion;
+ dispersion.create(size, CV_32FC3);
+ cv::multiply(acc, acc, acc);
+
+ cv::subtract(acc2, acc, dispersion);
+
+
/* 標準偏差を計算する */
- IplImage *sd = cvCreateImage( size, IPL_DEPTH_32F, 3 );
- cvPow( dispersion, sd, 0.5 );
+ cv::Mat sd;
+ sd.create(size, CV_32FC3);
+
+ cv::pow(dispersion, 0.5, sd);
+
+
- /* 閾値を計算する */
- backgroundThresholdImage = cvCreateImage( size, IPL_DEPTH_8U, 3 );
- cvConvertScale( sd, backgroundThresholdImage, thre_coefficient );
- cvReleaseImage( &acc );
- cvReleaseImage( &acc2 );
- cvReleaseImage( &dispersion );
- cvReleaseImage( &sd );
+
+ /* 閾値を計算する */
+ backgroundThresholdImage.create(size, CV_8UC3);
+ sd.convertTo(backgroundThresholdImage, CV_8U, thre_coefficient);
+
+
}
/*!
@@ -214,13 +213,10 @@
g_temp_w = 0;
g_temp_h = 0;
- originalImage = NULL;
- outputImage = NULL;
- resultImage = NULL;
- differenceImage = NULL;
+
/* 閾値の初期設定を表示 */
- printf( "threshold: %s\n", mode_str[1-mode].c_str() );
+ printf( "threshold: %s\n", mode_str[1-mode] );
return RTC::RTC_OK;
}
@@ -228,18 +224,26 @@
RTC::ReturnCode_t ImageSubstraction::onDeactivated(RTC::UniqueId ec_id)
{
- if(differenceImage != NULL){
- cvReleaseImage(&differenceImage);
+
+
+
+ if (!differenceImage.empty())
+ {
+ differenceImage.release();
}
- if(originalImage != NULL){
- cvReleaseImage(&originalImage);
+ if (!originalImage.empty())
+ {
+ originalImage.release();
}
- if(resultImage != NULL){
- cvReleaseImage(&resultImage);
+ if (!resultImage.empty())
+ {
+ resultImage.release();
}
- if(outputImage != NULL){
- cvReleaseImage(&outputImage);
+ if (!outputImage.empty())
+ {
+ outputImage.release();
}
+
return RTC::RTC_OK;
}
@@ -254,15 +258,18 @@
if( m_cont_mode == 'b' )
{
- /* 'b'キーが押されたらその時点での画像を背景画像とする */
- initializeBackgroundModel( NUM_OF_BACKGROUND_FRAMES, cvSize(m_img_width, m_img_height), m_thre_coefficient);
+ if (g_temp_w != 0 && g_temp_h != 0)
+ {
+ /* 'b'キーが押されたらその時点での画像を背景画像とする */
+ initializeBackgroundModel(NUM_OF_BACKGROUND_FRAMES, cv::Size(g_temp_w, g_temp_h), m_thre_coefficient);
- printf( "Background image update\n" ); /* 背景情報更新 */
+ printf("Background image update\n"); /* 背景情報更新 */
+ }
} else if( m_cont_mode == 'm' ){
/* 'm'キーが押されたら閾値の設定方法を変更する */
mode = 1 - mode;
- printf( "threshold: %s\n", mode_str[mode].c_str() );
+ printf( "threshold: %s\n", mode_str[mode] );
}
}
@@ -273,36 +280,27 @@
if(g_temp_w != m_img_orig.width || g_temp_h != m_img_orig.height){
- if(originalImage != NULL){
- cvReleaseImage(&originalImage);
+
+ if (originalImage.empty()){
+ originalImage.create(cv::Size(m_img_orig.width, m_img_orig.height), CV_8UC3); /* キャプチャ画像用IplImage */
}
- if(originalImage == NULL){
- originalImage = cvCreateImage(cvSize(m_img_orig.width, m_img_orig.height), IPL_DEPTH_8U, 3); /* キャプチャ画像用IplImage */
+
+ if (outputImage.empty()){
+ outputImage.create(cv::Size(m_img_orig.width, m_img_orig.height), CV_8UC3);
}
- if(outputImage != NULL){
- cvReleaseImage(&outputImage);
- }
- if(outputImage == NULL){
- outputImage = cvCreateImage(cvSize(m_img_orig.width, m_img_orig.height), IPL_DEPTH_8U, 3);
- }
- memcpy(originalImage->imageData,(void *)&(m_img_orig.pixels[0]), m_img_orig.pixels.length());
+ memcpy(originalImage.data,(void *)&(m_img_orig.pixels[0]), m_img_orig.pixels.length());
- if(differenceImage != NULL){
- cvReleaseImage(&differenceImage);
+
+ if(differenceImage.empty()){
+ differenceImage = originalImage.clone();
}
- if(differenceImage == NULL){
- differenceImage = cvCloneImage(originalImage);
- }
- if(resultImage != NULL){
- cvReleaseImage(&resultImage);
+ if (resultImage.empty()){
+ resultImage.create(cv::Size(m_img_orig.width, m_img_orig.height), CV_8UC3);
}
- if(resultImage == NULL){
- resultImage = cvCreateImage(cvSize(m_img_orig.width, m_img_orig.height),IPL_DEPTH_8U, 1);
- }
- initializeBackgroundModel( NUM_OF_BACKGROUND_FRAMES, cvSize(m_img_orig.width, m_img_orig.height) , m_thre_coefficient);
+ initializeBackgroundModel( NUM_OF_BACKGROUND_FRAMES, cv::Size(m_img_orig.width, m_img_orig.height) , m_thre_coefficient);
ImageSubstraction_count = 1;
g_temp_w = m_img_orig.width;
@@ -316,52 +314,50 @@
if(g_temp_w == m_img_orig.width && g_temp_h == m_img_orig.height){
- if(originalImage != NULL){
- cvReleaseImage(&originalImage);
- }
- if(originalImage == NULL){
- originalImage = cvCreateImage(cvSize(m_img_orig.width, m_img_orig.height), IPL_DEPTH_8U, 3); /* キャプチャ画像用IplImage */
+ if(originalImage.empty()){
+ originalImage.create(cv::Size(m_img_orig.width, m_img_orig.height), CV_8UC3); /* キャプチャ画像用IplImage */
}
- if(outputImage != NULL){
- cvReleaseImage(&outputImage);
- }
- if(outputImage == NULL){
- outputImage = cvCreateImage(cvSize(m_img_orig.width, m_img_orig.height), IPL_DEPTH_8U, 3);
+ if(outputImage.empty()){
+ outputImage.create(cv::Size(m_img_orig.width, m_img_orig.height), CV_8UC3);
}
- memcpy(originalImage->imageData,(void *)&(m_img_orig.pixels[0]), m_img_orig.pixels.length());
+ memcpy(originalImage.data,(void *)&(m_img_orig.pixels[0]), m_img_orig.pixels.length());
/* 現在の背景との差の絶対値を成分ごとに取る */
- cvAbsDiff( originalImage, backgroundAverageImage, differenceImage );
+ cv::absdiff(originalImage, backgroundAverageImage, differenceImage);
/* Sub はマイナスになったら0に切り詰めてくれる */
if( mode == DYNAMIC_MODE ){
- cvSub( differenceImage, backgroundThresholdImage, differenceImage );
+ cv::subtract(differenceImage, backgroundThresholdImage, differenceImage);
} else{
- cvSubS( differenceImage, cvScalarAll( m_constant_thre ), differenceImage );
+ cv::subtract(differenceImage, cv::Scalar::all(m_constant_thre), differenceImage);
}
/* differenceImage の要素が1つでも0以上だったら前景 */
- cvCvtColor( differenceImage, resultImage, CV_BGR2GRAY );
- cvThreshold( resultImage, resultImage, 0, 255, CV_THRESH_BINARY );
+ cv::cvtColor(differenceImage, resultImage, cv::COLOR_BGR2GRAY);
+ cv::threshold(resultImage, resultImage, 0, 255, cv::THRESH_BINARY | cv::THRESH_OTSU);
/* メディアンフィルタでノイズを除去する */
- cvSmooth( resultImage, resultImage, CV_MEDIAN );
+ cv::medianBlur(resultImage, resultImage, 7);
- IplImage *tmp = cvCloneImage( differenceImage );
- cvConvertScale( tmp, tmp, 3 );
+ cv::Mat tmp = differenceImage.clone();
+ tmp.convertTo(tmp, CV_8U, 3);
//showFlipImage( windowNameThreshold, tmp );
+ std::vector<cv::Mat> tmp_vec;
+ tmp_vec.push_back(resultImage);
+ tmp_vec.push_back(resultImage);
+ tmp_vec.push_back(resultImage);
- cvMerge( resultImage, resultImage, resultImage, NULL, outputImage );
+ cv::merge(tmp_vec, outputImage);
/* 画像データのサイズ取得 */
- double len1 = (originalImage->nChannels * originalImage->width * originalImage->height);
- double len2 = (outputImage->nChannels * outputImage->width * outputImage->height);
- double len3 = (backgroundAverageImage->nChannels * backgroundAverageImage->width * backgroundAverageImage->height);
- double len4 = (tmp->nChannels * tmp->width * tmp->height);
+ double len1 = (originalImage.channels() * originalImage.size().width * originalImage.size().height);
+ double len2 = (outputImage.channels() * outputImage.size().width * outputImage.size().height);
+ double len3 = (backgroundAverageImage.channels() * backgroundAverageImage.size().width * backgroundAverageImage.size().height);
+ double len4 = (tmp.channels() * tmp.size().width * tmp.size().height);
m_img_capture.pixels.length(len1);
m_img_result.pixels.length(len2);
@@ -369,32 +365,29 @@
m_img_threshold.pixels.length(len4);
/* 該当のイメージをMemCopyする */
- memcpy((void *)&(m_img_capture.pixels[0]), originalImage->imageData, len1);
- memcpy((void *)&(m_img_result.pixels[0]), outputImage->imageData, len2);
- memcpy((void *)&(m_img_back.pixels[0]), backgroundAverageImage->imageData, len3);
- memcpy((void *)&(m_img_threshold.pixels[0]), tmp->imageData, len4);
+ memcpy((void *)&(m_img_capture.pixels[0]), originalImage.data, len1);
+ memcpy((void *)&(m_img_result.pixels[0]), outputImage.data, len2);
+ memcpy((void *)&(m_img_back.pixels[0]), backgroundAverageImage.data, len3);
+ memcpy((void *)&(m_img_threshold.pixels[0]), tmp.data, len4);
- m_img_capture.width = originalImage->width;
- m_img_capture.height = originalImage->height;
+ m_img_capture.width = originalImage.size().width;
+ m_img_capture.height = originalImage.size().height;
- m_img_result.width = originalImage->width;
- m_img_result.height = originalImage->height;
+ m_img_result.width = originalImage.size().width;
+ m_img_result.height = originalImage.size().height;
- m_img_back.width = originalImage->width;
- m_img_back.height = originalImage->height;
+ m_img_back.width = originalImage.size().width;
+ m_img_back.height = originalImage.size().height;
- m_img_threshold.width = originalImage->width;
- m_img_threshold.height = originalImage->height;
+ m_img_threshold.width = originalImage.size().width;
+ m_img_threshold.height = originalImage.size().height;
m_img_captureOut.write();
m_img_resultOut.write();
m_img_backOut.write();
m_img_thresholdOut.write();
- cvReleaseImage( &tmp );
- cvReleaseImage(&originalImage);
- cvReleaseImage(&outputImage);
g_temp_w = m_img_orig.width;
g_temp_h = m_img_orig.height;
Modified: trunk/ImageProcessing/opencv/components/MFCamera/include/MFCamera/MFCapture.h
===================================================================
--- trunk/ImageProcessing/opencv/components/MFCamera/include/MFCamera/MFCapture.h 2016-08-22 02:27:08 UTC (rev 180)
+++ trunk/ImageProcessing/opencv/components/MFCamera/include/MFCamera/MFCapture.h 2017-02-02 00:05:08 UTC (rev 181)
@@ -7,9 +7,7 @@
#include "MFUtils.h"
-#include<opencv/cv.h>
-#include<opencv/cxcore.h>
-#include<opencv/highgui.h>
+#include <opencv2/imgproc/imgproc.hpp>
enum FMT_Values {
FMT_RGB24, FMT_YUY2, FMT_UNKNOWN=256
@@ -291,7 +289,7 @@
* The image data will store in image_buf(IplImage).
* If the video format is 'YUY2', the date will convert to RGB24 format.
*/
- IplImage *MFCapture::GetBufferData();
+ cv::Mat MFCapture::GetBufferData();
/*
* Enumarate available capture device.
@@ -404,7 +402,7 @@
INT32 image_stride;
UINT32 image_length;
UINT32 image_width, image_height;
- IplImage *image_buf;
+ cv::Mat image_buf;
/*
* List of VideoFormat
Modified: trunk/ImageProcessing/opencv/components/MFCamera/src/MFCamera.cpp
===================================================================
--- trunk/ImageProcessing/opencv/components/MFCamera/src/MFCamera.cpp 2016-08-22 02:27:08 UTC (rev 180)
+++ trunk/ImageProcessing/opencv/components/MFCamera/src/MFCamera.cpp 2017-02-02 00:05:08 UTC (rev 181)
@@ -196,19 +196,19 @@
std::cout << "Format = "<< m_vfmt_id << std::endl;
}
- if(!m_capture->image_buf)
+ if(m_capture->image_buf.empty())
{
std::cout << "No image buffer..." << std::endl;
return RTC::RTC_ERROR;
}
- if(m_capture->GetBufferData())
+ if (!m_capture->GetBufferData().empty())
{
m_out.pixels.length(m_capture->image_length);
- m_out.width = m_capture->image_buf->width;
- m_out.height = m_capture->image_buf->height;
+ m_out.width = m_capture->image_buf.size().width;
+ m_out.height = m_capture->image_buf.size().height;
- memcpy((void *)&(m_out.pixels[0]), m_capture->image_buf->imageData, m_capture->image_length);
+ memcpy((void *)&(m_out.pixels[0]), m_capture->image_buf.data, m_capture->image_length);
m_outOut.write();
}
Modified: trunk/ImageProcessing/opencv/components/MFCamera/src/MFCapture.cpp
===================================================================
--- trunk/ImageProcessing/opencv/components/MFCamera/src/MFCapture.cpp 2016-08-22 02:27:08 UTC (rev 180)
+++ trunk/ImageProcessing/opencv/components/MFCamera/src/MFCapture.cpp 2017-02-02 00:05:08 UTC (rev 181)
@@ -12,10 +12,8 @@
* Constructor
*/
MFCapture::MFCapture() : nDevs(0), devId(-1), image_width(0), image_height(0),
- image_buf(NULL), pSource(NULL), pReader(NULL), pAttributes(NULL),
- ppDevices(NULL), pType(NULL), image_format(MFVideoFormat_RGB24), YUV_Matrix(0),
- outMediaTypes(NULL),numOutMediaTypes(0),image_stride(0),
- image_length(0),pMFT(NULL),pHandler(NULL)
+ image_format(MFVideoFormat_RGB24), YUV_Matrix(0),
+ numOutMediaTypes(0),image_stride(0),image_length(0)
{
// Create an attribute store to specify the enumeration parameters.
hr = MFCreateAttributes(&pAttributes, 1);
@@ -70,7 +68,7 @@
}
CoTaskMemFree(ppDevices);
SafeRelease(&pSource);
- if (image_buf != NULL) { cvReleaseImage(&image_buf); }
+
}
/*
@@ -261,8 +259,9 @@
void
MFCapture::CreateImageBuffer(UINT32 depth, UINT32 channels)
{
- if (image_buf != NULL) { cvReleaseImage(&image_buf); }
- image_buf = cvCreateImage(cvSize(image_width, image_height), depth, channels);
+
+ image_buf.create(cv::Size(image_width, image_height), CV_MAKETYPE(depth, channels));
+
image_length = image_width * image_height * channels;
return;
@@ -319,7 +318,7 @@
/*
* Get Captured Image
*/
-IplImage *
+cv::Mat
MFCapture::GetBufferData()
{
IMFMediaBuffer *buff=NULL;
@@ -341,13 +340,13 @@
hr = sample->GetBufferByIndex(0, &buff);
if (FAILED(hr))
{
- return NULL;
+ return cv::Mat();
}
hr = buff->Lock(&memory, &maxLen, &curLen);
if (FAILED(hr))
{
- return NULL;
+ return cv::Mat();
}
UINT32 size = image_length;
@@ -355,11 +354,11 @@
#if 1
if(image_format == MFVideoFormat_YUY2){
/// If the captured image format is YUY2, convert to RGB24
- YUY2_to_RGB((char *)memory, (char *)image_buf->imageData,
- image_buf->width, image_buf->height, size, YUV_Matrix);
+ YUY2_to_RGB((char *)memory, (char *)image_buf.data,
+ image_buf.size().width, image_buf.size().height, size, YUV_Matrix);
}else{ // Should be MFVideoFormat_RGB24
- memcpy(image_buf->imageData, (void *)memory, size);
+ memcpy(image_buf.data, (void *)memory, size);
}
#else
memcpy(image_buf->imageData, (void *)memory, size);
@@ -371,7 +370,7 @@
return image_buf;
}
- return NULL;
+ return cv::Mat();
}
/*
Modified: trunk/ImageProcessing/opencv/components/ObjectTracking/include/ObjectTracking/ObjectTracking.h
===================================================================
--- trunk/ImageProcessing/opencv/components/ObjectTracking/include/ObjectTracking/ObjectTracking.h 2016-08-22 02:27:08 UTC (rev 180)
+++ trunk/ImageProcessing/opencv/components/ObjectTracking/include/ObjectTracking/ObjectTracking.h 2017-02-02 00:05:08 UTC (rev 181)
@@ -19,10 +19,11 @@
#include <rtm/idl/ExtendedDataTypesSkel.h>
#include <rtm/idl/InterfaceDataTypesSkel.h>
-#include <cv.h>
-#include <cvaux.h>
-#include <highgui.h>
+#include <opencv2/video/tracking.hpp>
+#include <opencv2/imgproc/imgproc.hpp>
+#include <opencv2/highgui/highgui.hpp>
+
#define SEGMENT 150 /* cvSnakeImageで用いる制御点の数 */
#define WINDOW_WIDTH 17 /* cvSnakeImageで最小値を探索する近傍領域の幅 */
#define WINDOW_HEIGHT 17 /* cvSnakeImageで最小値を探索する近傍領域の高さ */
@@ -302,8 +303,48 @@
// <rtc-template block="consumer_declare">
// </rtc-template>
+ void on_mouse(int event, int x, int y, int flags, void* param);
+ cv::Scalar hsv2rgb(float hue);
+ void CalculateHist(cv::MatND &hist, cv::Mat &hsvImage, cv::Mat &maskImage, cv::Rect &selection);
private:
+ cv::Mat inputImage; /* 入力されたIplImage */
+ cv::Mat resultImage; /* 処理結果表示用IplImage */
+ cv::Mat hsvImage; /* HSV表色系用IplImage */
+ cv::Mat hueImage; /* HSV表色系のHチャンネル用IplImage */
+ cv::Mat maskImage; /* マスク画像用IplImage */
+ cv::Mat backprojectImage; /* バックプロジェクション画像用IplImage */
+ cv::Mat histImage; /* ヒストグラム描画用IplImage */
+ cv::Mat grayImage; /* グレースケール画像用IplImage */
+
+ cv::MatND hist; /* ヒストグラム処理用構造体 */
+
+ cv::Mat frameImage; /* キャプチャ画像用IplImage */
+ cv::VideoCapture capture; /* キー入力結果を格納する変数 */
+ int count = 0;
+ int g_temp_w = 0;
+ int g_temp_h = 0;
+
+ /* 処理モード選択用フラグ */
+ int backprojectMode = HIDDEN_BACKPROJECTION;
+ int selectObject = SELECT_OFF;
+ int trackObject = TRACKING_STOP;
+ int showHist = SHOW_HISTOGRAM;
+
+ /* CamShiftトラッキング用変数 */
+ cv::Point origin;
+ cv::Rect selection;
+ cv::Rect trackWindow;
+ cv::RotatedRect trackRegion;
+ cv::TermCriteria trackComp;
+
+ /* ヒストグラム用変数 */
+ int hdims = H_DIMENSION; /* ヒストグラムの次元数 */
+ static const float hRangesArray[2]; /* ヒストグラムのレンジ */
+
+ int vmin = V_MIN;
+ int vmax = V_MAX;
+
// <rtc-template block="private_attribute">
// </rtc-template>
Modified: trunk/ImageProcessing/opencv/components/ObjectTracking/src/ObjectTracking.cpp
===================================================================
--- trunk/ImageProcessing/opencv/components/ObjectTracking/src/ObjectTracking.cpp 2016-08-22 02:27:08 UTC (rev 180)
+++ trunk/ImageProcessing/opencv/components/ObjectTracking/src/ObjectTracking.cpp 2017-02-02 00:05:08 UTC (rev 181)
@@ -9,6 +9,8 @@
#include "ObjectTracking.h"
+const float ObjectTracking::hRangesArray[2] = { H_RANGE_MIN, H_RANGE_MAX }; /* ヒストグラムのレンジ */
+
// Module specification
// <rtc-template block="module_spec">
static const char* objecttracking_spec[] =
@@ -28,43 +30,7 @@
};
// </rtc-template>
-IplImage *inputImage = NULL; /* 入力されたIplImage */
-IplImage *resultImage = NULL; /* 処理結果表示用IplImage */
-IplImage *hsvImage = NULL; /* HSV表色系用IplImage */
-IplImage *hueImage = NULL; /* HSV表色系のHチャンネル用IplImage */
-IplImage *maskImage = NULL; /* マスク画像用IplImage */
-IplImage *backprojectImage = NULL; /* バックプロジェクション画像用IplImage */
-IplImage *histImage = NULL; /* ヒストグラム描画用IplImage */
-IplImage *grayImage = NULL; /* グレースケール画像用IplImage */
-CvHistogram *hist = NULL; /* ヒストグラム処理用構造体 */
-
-IplImage *frameImage; /* キャプチャ画像用IplImage */
-CvCapture *capture; /* キー入力結果を格納する変数 */
-int count = 0;
-int g_temp_w = 0;
-int g_temp_h = 0;
-
-/* 処理モード選択用フラグ */
-int backprojectMode = HIDDEN_BACKPROJECTION;
-int selectObject = SELECT_OFF;
-int trackObject = TRACKING_STOP;
-int showHist = SHOW_HISTOGRAM;
-
-/* CamShiftトラッキング用変数 */
-CvPoint origin;
-CvRect selection;
-CvRect trackWindow;
-CvBox2D trackRegion;
-CvConnectedComp trackComp;
-
-/* ヒストグラム用変数 */
-int hdims = H_DIMENSION; /* ヒストグラムの次元数 */
-float hRangesArray[] = {H_RANGE_MIN, H_RANGE_MAX}; /* ヒストグラムのレンジ */
-float *hRanges = hRangesArray;
-int vmin = V_MIN;
-int vmax = V_MAX;
-
//
// マウスドラッグによって初期追跡領域を指定する
//
@@ -75,16 +41,16 @@
// flags : 本プログラムでは未使用
// param : 本プログラムでは未使用
//
-static void on_mouse( int event, int x, int y, int flags, void* param ){
+void ObjectTracking::on_mouse(int event, int x, int y, int flags, void* param){
/* 画像が取得されていなければ、処理を行わない */
- if( resultImage == NULL ){
+ if( resultImage.empty() ){
return;
}
/* 原点の位置に応じてyの値を反転(画像の反転ではない) */
- if( resultImage->origin == 1 ){
- y = resultImage->height - y;
- }
+ //if( resultImage->origin == 1 ){
+ // y = resultImage.size().height - y;
+ //}
/* マウスの左ボタンが押されていれば以下の処理を行う */
if( selectObject == SELECT_ON ){
selection.x = MIN( x, origin.x );
@@ -94,8 +60,8 @@
selection.x = MAX( selection.x, 0 );
selection.y = MAX( selection.y, 0 );
- selection.width = MIN( selection.width, resultImage->width );
- selection.height = MIN( selection.height, resultImage->height );
+ selection.width = MIN( selection.width, resultImage.size().width );
+ selection.height = MIN(selection.height, resultImage.size().height);
selection.width = selection.width - selection.x;
selection.height = selection.height - selection.y;
}
@@ -103,8 +69,8 @@
switch( event ){
case CV_EVENT_LBUTTONDOWN:
/* マウスの左ボタンが押されたのであれば、原点および選択された領域を設定 */
- origin = cvPoint( x, y );
- selection = cvRect( x, y, 0, 0 );
+ origin = cv::Point( x, y );
+ selection = cv::Rect( x, y, 0, 0 );
selectObject = SELECT_ON;
break;
case CV_EVENT_LBUTTONUP:
@@ -126,26 +92,23 @@
// 戻り値:
// CvScalar: RGBの色情報がBGRの順で格納されたコンテナ
//
-CvScalar hsv2rgb( float hue ){
- IplImage *rgbValue, *hsvValue;
- rgbValue = cvCreateImage( cvSize(1,1), IPL_DEPTH_8U, 3 );
- hsvValue = cvCreateImage( cvSize(1,1), IPL_DEPTH_8U, 3 );
+cv::Scalar ObjectTracking::hsv2rgb(float hue){
+ cv::Mat rgbValue, hsvValue;
+ rgbValue.create(cv::Size(1, 1), CV_8UC3);
+ hsvValue.create(cv::Size(1, 1), CV_8UC3);
- hsvValue->imageData[0] = hue; /* 色相値H */
- hsvValue->imageData[1] = 255; /* 彩度値S */
- hsvValue->imageData[2] = 255; /* 明度値V */
+ hsvValue.data[0] = hue; /* 色相値H */
+ hsvValue.data[1] = 255; /* 彩度値S */
+ hsvValue.data[2] = 255; /* 明度値V */
/* HSV表色系をRGB表色系に変換する */
- cvCvtColor( hsvValue, rgbValue, CV_HSV2BGR );
+ cv::cvtColor( hsvValue, rgbValue, CV_HSV2BGR );
- return cvScalar( (unsigned char)rgbValue->imageData[0],
- (unsigned char)rgbValue->imageData[1],
- (unsigned char)rgbValue->imageData[2],
- 0 );
- /* メモリを解放する */
- cvReleaseImage( &rgbValue );
- cvReleaseImage( &hsvValue );
+ return cv::Scalar((unsigned char)rgbValue.data[0], (unsigned char)rgbValue.data[1],
+ (unsigned char)rgbValue.data[2], 0);
+
+
}
@@ -158,63 +121,101 @@
// maskImage : マスク画像用IplImage
// selection : マウスで選択された矩形領域
//
-void CalculateHist( CvHistogram *hist, IplImage *hsvImage, IplImage *maskImage, CvRect selection ){
+void ObjectTracking::CalculateHist(cv::MatND &hist, cv::Mat &hsvImage, cv::Mat &maskImage, cv::Rect &selection){
int i;
int binW; /* ヒストグラムの各ビンの、画像上での幅 */
int val; /* ヒストグラムの頻度 */
- float maxVal; /* ヒストグラムの最大頻度 */
+ double maxVal; /* ヒストグラムの最大頻度 */
/* hsv画像の各画素が値の範囲内に入っているかチェックし、 */
/* マスク画像maskImageを作成する */
- cvInRangeS( hsvImage,
- cvScalar( H_RANGE_MIN, S_MIN, MIN(V_MIN,V_MAX), 0 ),
- cvScalar( H_RANGE_MAX, S_MAX, MAX(V_MIN,V_MAX), 0 ),
+ cv::inRange(hsvImage,
+ cv::Scalar( H_RANGE_MIN, S_MIN, MIN(V_MIN,V_MAX), 0 ),
+ cv::Scalar( H_RANGE_MAX, S_MAX, MAX(V_MIN,V_MAX), 0 ),
maskImage );
/* hsvImageのうち、とくに必要なHチャンネルをhueImageとして分離する */
- cvSplit( hsvImage, hueImage, 0, 0, 0 );
+ std::vector<cv::Mat> tmp;
+ tmp.push_back(hueImage);
+ tmp.push_back(cv::Mat());
+ tmp.push_back(cv::Mat());
+ cv::split(hsvImage, tmp);
+
/* trackObjectがTRACKING_START状態なら、以下の処理を行う */
if( trackObject == TRACKING_START ){
/* 追跡領域のヒストグラム計算とhistImageへの描画 */
maxVal = 0.0;
- cvSetImageROI( hueImage, selection );
- cvSetImageROI( maskImage, selection );
+ //hueImage.adjustROI(selection.y + selection.size().height, selection.y, selection.x, selection.x + selection.size().width);
+ //maskImage.adjustROI(selection.y + selection.size().height, selection.y, selection.x, selection.x + selection.size().width);
+
+ cv::Mat hueImage_roi = hueImage(selection);
+ cv::Mat maskImage_roi = maskImage(selection);
+
/* ヒストグラムを計算し、最大値を求める */
- cvCalcHist( &hueImage, hist, 0, maskImage );
- cvGetMinMaxHistValue( hist, 0, &maxVal, 0, 0 );
+ int hbins = 30, sbins = 32;
+ //int histSize[] = { hbins, sbins };
+ int histSize = 128;
+ float hranges[] = { 0, 180 };
+ float sranges[] = { 0, 256 };
+ const float* ranges[] = { hranges, sranges };
+ int image_num = 1;
+ int channels[] = { 0 };
+
+
+
+ cv::calcHist(&hueImage_roi, image_num, channels, maskImage_roi, hist, 1, &histSize, ranges);
+
+
+ cv::minMaxLoc(hist, 0, &maxVal, 0, 0);
+
/* ヒストグラムの縦軸(頻度)を0-255のダイナミックレンジに正規化 */
- if( maxVal == 0.0 ){
- cvConvertScale( hist->bins, hist->bins, 0.0, 0 );
+ /*if( maxVal == 0.0 ){
+ hist.convertTo(hist, CV_8U, 0.0, 0);
+ //cvConvertScale( hist->bins, hist->bins, 0.0, 0 );
} else{
- cvConvertScale( hist->bins, hist->bins, 255.0 / maxVal, 0 );
- }
-
+ hist.convertTo(hist, CV_8U, 255.0 / maxVal, 0);
+ //cvConvertScale( hist->bins, hist->bins, 255.0 / maxVal, 0 );
+ }*/
+ normalize(hist, hist, 0, 255, cv::NORM_MINMAX, -1, cv::Mat());
+
/* hue,mask画像に設定されたROIをリセット */
- cvResetImageROI( hueImage );
- cvResetImageROI( maskImage );
trackWindow = selection;
+ //trackWindow &= cv::Rect(0, 0, hueImage_roi.cols, hueImage_roi.rows);
/* trackObjectをTRACKING_NOWにする */
trackObject = TRACKING_NOW;
/* ヒストグラム画像をゼロクリア */
- cvSetZero( histImage );
+ histImage = cv::Mat::zeros(histImage.size(), CV_MAKETYPE(histImage.depth(),histImage.channels()));
+
/* 各ビンの幅を決める */
- binW = histImage->width / hdims;
+ binW = histImage.size().width / histSize;
/* ヒストグラムを描画する */
- for( i = 0; i < hdims; i++ ){
- val = cvRound( cvGetReal1D(hist->bins,i) * histImage->height / 255 );
- CvScalar color = hsv2rgb( i * 180.0 / hdims );
- cvRectangle( histImage,
- cvPoint( i * binW, histImage->height ),
- cvPoint( (i+1) * binW, histImage->height - val ),
+
+
+ //cv::normalize(hist, hist, (double)histImage.size().height);
+ for (i = 0; i < histSize; i++){
+ //val = cvRound(hist.at<float>(i));
+ cv::Scalar color = hsv2rgb( i * 180.0 / hdims );
+ /*cv::rectangle(histImage,
+ cv::Point( i * binW, histImage.size().height ),
+ cv::Point((i + 1) * binW, histImage.size().height - val),
color,
-1,
8,
- 0 );
+ 0 );*/
+
+ cv::rectangle(histImage,
+ cv::Point(i * binW, histImage.size().height),
+ cv::Point((i + 1) * binW, histImage.size().height - hist.at<uchar>(i)),
+ cv::Scalar::all(255), // 矩形の色
+ CV_FILLED // 矩形の枠線の太さ。CV_FILLEDの場合塗りつぶし
+ );
+
}
}
+
}
/*!
@@ -233,6 +234,15 @@
// </rtc-template>
{
+ backprojectMode = HIDDEN_BACKPROJECTION;
+ selectObject = SELECT_OFF;
+ trackObject = TRACKING_STOP;
+ showHist = SHOW_HISTOGRAM;
+ hdims = H_DIMENSION; /* ヒストグラムの次元数 */
+
+
+ vmin = V_MIN;
+ vmax = V_MAX;
}
/*!
@@ -299,288 +309,272 @@
g_temp_w = 0;
g_temp_h = 0;
- inputImage = NULL;
- resultImage = NULL;
- hsvImage = NULL;
- hueImage = NULL;
- maskImage = NULL;
- backprojectImage = NULL;
- grayImage = NULL;
- histImage = NULL;
+
return RTC::RTC_OK;
}
RTC::ReturnCode_t ObjectTracking::onDeactivated(RTC::UniqueId ec_id)
{
- if(inputImage != NULL){
- cvReleaseImage(&inputImage);
+
+
+ if (!inputImage.empty())
+ {
+ inputImage.release();
}
- if(resultImage != NULL){
- cvReleaseImage(&resultImage);
+ if (!resultImage.empty())
+ {
+ resultImage.release();
}
- if(hsvImage != NULL){
- cvReleaseImage(&hsvImage);
+ if (!hsvImage.empty())
+ {
+ hsvImage.release();
}
- if(hueImage != NULL){
- cvReleaseImage(&hueImage);
- }
- if(hueImage != NULL){
- cvReleaseImage(&maskImage);
- }
- if(backprojectImage != NULL){
- cvReleaseImage(&backprojectImage);
- }
- if(grayImage != NULL){
- cvReleaseImage(&grayImage);
- }
- if(histImage != NULL){
- cvReleaseImage(&histImage);
- }
+
return RTC::RTC_OK;
}
RTC::ReturnCode_t ObjectTracking::onExecute(RTC::UniqueId ec_id)
{
- int i;
- int j;
+ int i;
+ int j;
- int x;
- int y;
- int mouse_event;
+ int x;
+ int y;
+ int mouse_event;
- /* Snake用のパラメータ */
- float alpha = 1.0; /* 連続エネルギーの重みパラメータ */
- float beta = 0.5; /* 曲率の重みパラメータ */
- float gamma = 1.5; /* 画像エネルギーの重みパラメータ */
- CvPoint pt[SEGMENT]; /* 制御点の座標 */
- CvSize window; /* 最小値を探索する近傍サイズ */
- window.width = WINDOW_WIDTH;
- window.height = WINDOW_HEIGHT;
- CvTermCriteria crit;
- crit.type = CV_TERMCRIT_ITER; /* 終了条件の設定 */
- crit.max_iter = ITERATION_SNAKE; /* 関数の最大反復数 */
+ /* Snake用のパラメータ */
+ float alpha = 1.0; /* 連続エネルギーの重みパラメータ */
+ float beta = 0.5; /* 曲率の重みパラメータ */
+ float gamma = 1.5; /* 画像エネルギーの重みパラメータ */
+ cv::Point pt[SEGMENT]; /* 制御点の座標 */
+ cv::Size window; /* 最小値を探索する近傍サイズ */
+ window.width = WINDOW_WIDTH;
+ window.height = WINDOW_HEIGHT;
+ cv::TermCriteria crit(cv::TermCriteria::MAX_ITER, ITERATION_SNAKE, 1.0);
- if(m_orig_imgIn.isNew()){
- m_orig_imgIn.read();
-
- /* 各メモリ確保 */
- if(inputImage == NULL){
- inputImage = cvCreateImage(cvSize(m_orig_img.width, m_orig_img.height), IPL_DEPTH_8U, 3);
- }
- if(g_temp_w != m_orig_img.width || g_temp_h != m_orig_img.height){
- cvReleaseImage(&inputImage);
- inputImage = cvCreateImage(cvSize(m_orig_img.width, m_orig_img.height), IPL_DEPTH_8U, 3);
- }
- if(resultImage == NULL){
- resultImage = cvCreateImage(cvSize(m_orig_img.width, m_orig_img.height), IPL_DEPTH_8U, 3);
- }
- if(g_temp_w != m_orig_img.width || g_temp_h != m_orig_img.height){
- cvReleaseImage(&resultImage);
- resultImage = cvCreateImage(cvSize(m_orig_img.width, m_orig_img.height), IPL_DEPTH_8U, 3);
- }
- resultImage->origin = inputImage->origin;
- if(hsvImage == NULL){
- hsvImage = cvCreateImage(cvSize(m_orig_img.width, m_orig_img.height), IPL_DEPTH_8U, 3);
- }
- if(g_temp_w != m_orig_img.width || g_temp_h != m_orig_img.height){
- cvReleaseImage(&hsvImage);
- hsvImage = cvCreateImage(cvSize(m_orig_img.width, m_orig_img.height), IPL_DEPTH_8U, 3);
- }
- if(hueImage == NULL){
- hueImage = cvCreateImage(cvSize(m_orig_img.width, m_orig_img.height), IPL_DEPTH_8U, 1);
- }
- if(g_temp_w != m_orig_img.width || g_temp_h != m_orig_img.height){
- cvReleaseImage(&hueImage);
- hueImage = cvCreateImage(cvSize(m_orig_img.width, m_orig_img.height), IPL_DEPTH_8U, 1);
- }
- if(maskImage == NULL){
- maskImage = cvCreateImage(cvSize(m_orig_img.width, m_orig_img.height), IPL_DEPTH_8U, 1);
- }
- if(g_temp_w != m_orig_img.width || g_temp_h != m_orig_img.height){
- cvReleaseImage(&maskImage);
- maskImage = cvCreateImage(cvSize(m_orig_img.width, m_orig_img.height), IPL_DEPTH_8U, 1);
- }
- if(backprojectImage == NULL){
- backprojectImage = cvCreateImage(cvSize(m_orig_img.width, m_orig_img.height), IPL_DEPTH_8U, 1);
- }
- if(g_temp_w != m_orig_img.width || g_temp_h != m_orig_img.height){
- cvReleaseImage(&backprojectImage);
- backprojectImage = cvCreateImage(cvSize(m_orig_img.width, m_orig_img.height), IPL_DEPTH_8U, 1);
- }
- if(grayImage == NULL){
- grayImage = cvCreateImage(cvSize(m_orig_img.width, m_orig_img.height), IPL_DEPTH_8U, 1);
- }
- if(g_temp_w != m_orig_img.width || g_temp_h != m_orig_img.height){
- cvReleaseImage(&grayImage);
- grayImage = cvCreateImage(cvSize(m_orig_img.width, m_orig_img.height), IPL_DEPTH_8U, 1);
- }
- /* ヒストグラム構造体の使用を宣言 */
- if(hist == NULL){
- hist = cvCreateHist( 1, &hdims, CV_HIST_ARRAY, &hRanges, 1 );
- }
- /* ヒストグラム用の画像を確保し、ゼロクリア */
- if(histImage == NULL){
- histImage = cvCreateImage( cvSize(HISTIMAGE_WIDTH, HISTIMAGE_HEIGHT), IPL_DEPTH_8U, 3 );
- }
- if(g_temp_w != m_orig_img.width || g_temp_h != m_orig_img.height){
- cvReleaseImage(&histImage);
- histImage = cvCreateImage(cvSize(m_orig_img.width, m_orig_img.height), IPL_DEPTH_8U, 3);
- }
- cvSetZero( histImage );
+ if (m_orig_imgIn.isNew()){
- /* InPortの映像の取得 */
- memcpy(inputImage->imageData,(void *)&(m_orig_img.pixels[0]),m_orig_img.pixels.length());
+ m_orig_imgIn.read();
- /* キャプチャされた画像をresultImageにコピーし、HSV表色系に変換してhsvImageに格納 */
- cvCopy( inputImage, resultImage, NULL );
- cvCvtColor( resultImage, hsvImage, CV_BGR2HSV );
+ /* 各メモリ確保 */
+ if (inputImage.empty())
+ {
+ inputImage.create(cv::Size(m_orig_img.width, m_orig_img.height), CV_8UC3);
+ }
+ else if (g_temp_w != m_orig_img.width || g_temp_h != m_orig_img.height)
+ {
+ inputImage.create(cv::Size(m_orig_img.width, m_orig_img.height), CV_8UC3);
+ }
+ if (resultImage.empty())
+ {
+ resultImage.create(cv::Size(m_orig_img.width, m_orig_img.height), CV_8UC3);
+ }
+ else if (g_temp_w != m_orig_img.width || g_temp_h != m_orig_img.height)
+ {
+ resultImage.create(cv::Size(m_orig_img.width, m_orig_img.height), CV_8UC3);
+ }
+ if (hsvImage.empty())
+ {
+ hsvImage.create(cv::Size(m_orig_img.width, m_orig_img.height), CV_8UC3);
+ }
+ else if (g_temp_w != m_orig_img.width || g_temp_h != m_orig_img.height)
+ {
+ hsvImage.create(cv::Size(m_orig_img.width, m_orig_img.height), CV_8UC3);
+ }
+ if (hueImage.empty())
+ {
+ hueImage.create(cv::Size(m_orig_img.width, m_orig_img.height), CV_8UC1);
+ }
+ else if (g_temp_w != m_orig_img.width || g_temp_h != m_orig_img.height)
+ {
+ hueImage.create(cv::Size(m_orig_img.width, m_orig_img.height), CV_8UC1);
+ }
+ if (maskImage.empty())
+ {
+ maskImage.create(cv::Size(m_orig_img.width, m_orig_img.height), CV_8UC1);
+ }
+ else if (g_temp_w != m_orig_img.width || g_temp_h != m_orig_img.height)
+ {
+ maskImage.create(cv::Size(m_orig_img.width, m_orig_img.height), CV_8UC1);
+ }
+ if (backprojectImage.empty())
+ {
+ backprojectImage.create(cv::Size(m_orig_img.width, m_orig_img.height), CV_8UC1);
+ }
+ else if (g_temp_w != m_orig_img.width || g_temp_h != m_orig_img.height)
+ {
+ backprojectImage.create(cv::Size(m_orig_img.width, m_orig_img.height), CV_8UC1);
+ }
+ if (grayImage.empty())
+ {
+ grayImage.create(cv::Size(m_orig_img.width, m_orig_img.height), CV_8UC1);
+ }
+ else if (g_temp_w != m_orig_img.width || g_temp_h != m_orig_img.height)
+ {
+ grayImage.create(cv::Size(m_orig_img.width, m_orig_img.height), CV_8UC1);
+ }
+ /* ヒストグラム用の画像を確保し、ゼロクリア */
+ if (histImage.empty())
+ {
+ histImage.create(cv::Size(m_orig_img.width, m_orig_img.height), CV_8UC3);
+ }
+ else if (g_temp_w != m_orig_img.width || g_temp_h != m_orig_img.height)
+ {
+ histImage.create(cv::Size(m_orig_img.width, m_orig_img.height), CV_8UC3);
+ }
+ histImage = cv::Mat::zeros(cv::Size(m_orig_img.width, m_orig_img.height), CV_8UC3);
- /* WindowのEvent情報の取得に対する処理 */
- if(m_eventIn.isNew() && m_xIn.isNew() && m_yIn.isNew()){
- m_xIn.read();
- m_yIn.read();
- m_eventIn.read();
- x = m_x.data;
- y = m_y.data;
- mouse_event = m_event.data;
+ //resultImage->origin = inputImage->origin;
- on_mouse(mouse_event, x, y, 0, 0);
- x= 0;
- y= 0;
- mouse_event = 0;
- }
- /* trackObjectフラグがTRACKING_STOP以外なら、以下の処理を行う */
- if( trackObject != TRACKING_STOP ){
- /* 追跡領域のヒストグラム計算と描画 */
- CalculateHist( hist, hsvImage, maskImage, selection );
- /* バックプロジェクションを計算する */
- cvCalcBackProject( &hueImage, backprojectImage, hist );
- /* backProjectionのうち、マスクが1であるとされた部分のみ残す */
- cvAnd( backprojectImage, maskImage, backprojectImage, 0 );
+ /* ヒストグラム構造体の使用を宣言 */
+ if (hist.empty()){
+ hist = cv::Mat::zeros(1, &hdims, CV_8UC1);
+ }
- /* CamShift法による領域追跡を実行する */
- cvCamShift( backprojectImage,
- trackWindow,
- cvTermCriteria( CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 10, 1 ),
- &trackComp,
- &trackRegion );
- trackWindow = trackComp.rect;
+ /* InPortの映像の取得 */
+ memcpy(inputImage.data, (void *)&(m_orig_img.pixels[0]), m_orig_img.pixels.length());
- /* SnakeImage用のグレースケール画像を作成する */
- cvCvtColor( resultImage, grayImage, CV_BGR2GRAY );
+ /* キャプチャされた画像をresultImageにコピーし、HSV表色系に変換してhsvImageに格納 */
+ inputImage.copyTo(resultImage);
+ cv::cvtColor(resultImage, hsvImage, CV_BGR2HSV);
- if( backprojectMode == SHOW_BACKPROJECTION ){
- cvCvtColor( backprojectImage, resultImage, CV_GRAY2BGR );
- }
- if( resultImage->origin == 1 ){
- trackRegion.angle = -trackRegion.angle;
- }
+ /* WindowのEvent情報の取得に対する処理 */
+ if (m_eventIn.isNew() && m_xIn.isNew() && m_yIn.isNew()){
+ m_xIn.read();
+ m_yIn.read();
+ m_eventIn.read();
- /* CamShiftでの領域追跡結果をSnakeの初期位置に設定する */
- for( i=0; i<SEGMENT; i++ ){
- pt[i].x = cvRound( trackRegion.size.width
- * cos(i * 6.28 / SEGMENT + trackRegion.angle)
- / 2.0 + trackRegion.center.x );
- pt[i].y = cvRound( trackRegion.size.height
- * sin(i * 6.28 / SEGMENT + trackRegion.angle)
- / 2.0 + trackRegion.center.y );
- }
- /* Snakeによる輪郭抽出を実行する */
- for( i=0; i<ITERATION_SNAKE; i++ ){
- cvSnakeImage( grayImage,
- pt,
- SEGMENT,
- &alpha,
- &beta,
- &gamma,
- CV_VALUE,
- window,
- crit,
- 1);
- /* 各輪郭点の間に線をひいて輪郭線を描画する */
- for( j=0; j<SEGMENT; j++ ){
- if( j < SEGMENT-1 ){
- cvLine( resultImage, pt[j], pt[j+1],
- cvScalar(0,0,255,0), 2, 8, 0 );
- }
- else{
- cvLine( resultImage, pt[j], pt[0],
- cvScalar(0,0,255,0), 2, 8, 0 );
- }
- }
- }
- }
+ x = m_x.data;
+ y = m_y.data;
+ mouse_event = m_event.data;
- /* マウスで選択中の初期追跡領域の色を反転させる */
- if( selectObject == SELECT_ON && selection.width > 0 && selection.height > 0 ){
+ on_mouse(mouse_event, x, y, 0, 0);
- cvSetImageROI( resultImage, selection );
- cvXorS( resultImage, cvScalarAll(255), resultImage, 0 );
- cvResetImageROI( resultImage );
- }
- /* backprojectImageの座標原点が左上の場合、上下を反転させる */
- if( backprojectImage->origin == 0 ){
- cvFlip( backprojectImage, backprojectImage, 0 );
- }
+ x = 0;
+ y = 0;
+ mouse_event = 0;
+ }
- /* 画像データのサイズ取得 */
- double len = (resultImage->nChannels * resultImage->width * resultImage->height);
- double leng = (histImage->nChannels * histImage->width * histImage->height);
- m_out_img.pixels.length(len);
- m_hist_img.pixels.length(leng);
+ /* trackObjectフラグがTRACKING_STOP以外なら、以下の処理を行う */
+ if (trackObject != TRACKING_STOP){
+
+ /* 追跡領域のヒストグラム計算と描画 */
+ CalculateHist(hist, hsvImage, maskImage, selection);
- /* 該当のイメージをMemCopyする */
- memcpy((void *)&(m_out_img.pixels[0]), resultImage->imageData, len);
- memcpy((void *)&(m_hist_img.pixels[0]), histImage->imageData, leng);
+ const float *ranges[] = { hRangesArray };
+ int image_num = 1;
+ int channels[] = { 0 };
+ int dnum = 1;
+
+
+ /* バックプロジェクションを計算する */
+ cv::calcBackProject(&hueImage, image_num, channels, hist, backprojectImage, ranges);
+
+ /* backProjectionのうち、マスクが1であるとされた部分のみ残す */
+ cv::bitwise_and(backprojectImage, maskImage, backprojectImage);
+
+ trackComp = cv::TermCriteria(cv::TermCriteria::EPS | cv::TermCriteria::COUNT, 10, 1);
+
+ /* CamShift法による領域追跡を実行する */
+ trackRegion = cv::CamShift(backprojectImage,
+ trackWindow,
+ trackComp);
+ //cv::ellipse(resultImage, trackRegion, cv::Scalar(0, 0, 255), 3, cv::LINE_AA);
- /* 反転した画像データをOutPortから出力する */
- m_out_img.width = inputImage->width;
- m_out_img.height = inputImage->height;
+ //trackWindow = trackComp.rect;
- m_hist_img.width = inputImage->width;
- m_hist_img.height = inputImage->height;
+ /* SnakeImage用のグレースケール画像を作成する */
+ cv::cvtColor(resultImage, grayImage, cv::COLOR_BGR2GRAY);
- m_out_imgOut.write();
- m_hist_imgOut.write();
+ if (backprojectMode == SHOW_BACKPROJECTION){
+ cv::cvtColor(backprojectImage, resultImage, CV_GRAY2BGR);
+ }
+ //if (resultImage->origin == 1){
+ // trackRegion.angle = -trackRegion.angle;
+ //}
- if(inputImage != NULL){
- cvReleaseImage(&inputImage);
- }
- if(resultImage != NULL){
- cvReleaseImage(&resultImage);
- }
- if(hsvImage != NULL){
- cvReleaseImage(&hsvImage);
- }
- if(hueImage != NULL){
- cvReleaseImage(&hueImage);
- }
- if(hueImage != NULL){
- cvReleaseImage(&maskImage);
- }
- if(backprojectImage != NULL){
- cvReleaseImage(&backprojectImage);
- }
- if(grayImage != NULL){
- cvReleaseImage(&grayImage);
- }
- if(histImage != NULL){
- cvReleaseImage(&histImage);
- }
- }
+ /* CamShiftでの領域追跡結果をSnakeの初期位置に設定する */
+ /*for (i = 0; i < SEGMENT; i++){
+ pt[i].x = cvRound(trackRegion.size.width
+ * cos(i * 6.28 / SEGMENT + trackRegion.angle)
+ / 2.0 + trackRegion.center.x);
+ pt[i].y = cvRound(trackRegion.size.height
+ * sin(i * 6.28 / SEGMENT + trackRegion.angle)
+ / 2.0 + trackRegion.center.y);
+ }*/
+ cv::Mat grayImage_roi = grayImage(trackWindow);
+ cv::Mat resultImage_roi = resultImage(trackWindow);
+ double qualityLevel = 0.01;
+ double minDistance = 10;
+ std::vector<cv::Point2f> corners;
+ int blockSize = 3;
+ bool useHarrisDetector = false;
+ double k = 0.04;
+ /* Snakeによる輪郭抽出を実行する */
+ cv::goodFeaturesToTrack(grayImage_roi, corners, ITERATION_SNAKE, qualityLevel, minDistance, cv::Mat(), blockSize, useHarrisDetector, k);
+ for (unsigned i = 1; i < corners.size(); i++){
- return RTC::RTC_OK;
+
+ /* 各輪郭点の間に線をひいて輪郭線を描画する */
+ cv::line(resultImage_roi, corners[i], corners[i-1],
+ cv::Scalar(0, 0, 255, 0), 2, 8, 0);
+ }
+
+ }
+
+ /* マウスで選択中の初期追跡領域の色を反転させる */
+ if (selectObject == SELECT_ON && selection.width > 0 && selection.height > 0){
+
+ //resultImage = resultImage.adjustROI(selection.y + selection.size().height, selection.y, selection.x, selection.x + selection.size().width);
+ cv::Mat roi = resultImage(selection);
+ cv::bitwise_xor(roi, cv::Scalar::all(255), roi);
+ //cvResetImageROI( resultImage );
+ }
+ /* backprojectImageの座標原点が左上の場合、上下を反転させる */
+ //if( backprojectImage->origin == 0 ){
+ // cv::flip( backprojectImage, backprojectImage, 0 );
+ //}
+
+ /* 画像データのサイズ取得 */
+ /*std::vector<cv::Mat> tmp;
+ tmp.push_back(backprojectImage);
+ tmp.push_back(backprojectImage);
+ tmp.push_back(backprojectImage);
+ cv::merge(tmp, resultImage);*/
+ double len = (resultImage.channels() * resultImage.size().width * resultImage.size().height);
+ double leng = (histImage.channels() * histImage.size().width * histImage.size().height);
+ m_out_img.pixels.length(len);
+ m_hist_img.pixels.length(leng);
+
+ /* 該当のイメージをMemCopyする */
+ memcpy((void *)&(m_out_img.pixels[0]), resultImage.data, len);
+ memcpy((void *)&(m_hist_img.pixels[0]), histImage.data, leng);
+
+ /* 反転した画像データをOutPortから出力する */
+ m_out_img.width = inputImage.size().width;
+ m_out_img.height = inputImage.size().height;
+
+ m_hist_img.width = inputImage.size().width;
+ m_hist_img.height = inputImage.size().height;
+
+ m_out_imgOut.write();
+ m_hist_imgOut.write();
+
+
+ }
+
+ return RTC::RTC_OK;
}
/*
Modified: trunk/ImageProcessing/opencv/components/OpenCVCamera/include/OpenCVCamera/OpenCVCamera.h
===================================================================
--- trunk/ImageProcessing/opencv/components/OpenCVCamera/include/OpenCVCamera/OpenCVCamera.h 2016-08-22 02:27:08 UTC (rev 180)
+++ trunk/ImageProcessing/opencv/components/OpenCVCamera/include/OpenCVCamera/OpenCVCamera.h 2017-02-02 00:05:08 UTC (rev 181)
@@ -19,9 +19,8 @@
#include <rtm/idl/ExtendedDataTypesSkel.h>
#include <rtm/idl/InterfaceDataTypesSkel.h>
-#include<cv.h>
-#include<cxcore.h>
-#include<highgui.h>
+#include <opencv2/imgproc/imgproc.hpp>
+#include <opencv2/highgui/highgui.hpp>
// Service implementation headers
// <rtc-template block="service_impl_h">
@@ -298,7 +297,7 @@
// </rtc-template>
int dummy;
int m_device_id; /* 使用中のカメラデバイスID */
- CvCapture* m_capture; /* カメラ用メモリ */
+ cv::VideoCapture m_capture; /* カメラ用メモリ */
};
Modified: trunk/ImageProcessing/opencv/components/OpenCVCamera/src/OpenCVCamera.cpp
===================================================================
--- trunk/ImageProcessing/opencv/components/OpenCVCamera/src/OpenCVCamera.cpp 2016-08-22 02:27:08 UTC (rev 180)
+++ trunk/ImageProcessing/opencv/components/OpenCVCamera/src/OpenCVCamera.cpp 2017-02-02 00:05:08 UTC (rev 181)
@@ -91,7 +91,7 @@
// </rtc-template>
m_device_id = -1;
- m_capture = NULL;
+
return RTC::RTC_OK;
}
@@ -121,9 +121,9 @@
RTC::ReturnCode_t OpenCVCamera::onActivated(RTC::UniqueId ec_id)
{
m_device_id = m_device_num;
-
+ m_capture.open(m_device_id);
/* カメラデバイスの探索 */
- if (NULL == (m_capture = cvCaptureFromCAM(m_device_id)))
+ if (!m_capture.isOpened())
{
cout << "No Camera Device" << endl;
return RTC::RTC_ERROR;
@@ -136,10 +136,7 @@
RTC::ReturnCode_t OpenCVCamera::onDeactivated(RTC::UniqueId ec_id)
{
/* カメラ用メモリの解放 */
- if (m_capture != NULL)
- {
- cvReleaseCapture(&m_capture);
- }
+ m_capture.release();
return RTC::RTC_OK;
}
@@ -149,51 +146,56 @@
static coil::TimeValue tmOld;
static int count = 0;
const int DISPLAY_PERIOD_FRAME_NUM = 100;
- IplImage *cam_frame = NULL;
+ cv::Mat cam_frame;
/* 実行中にコンフィグレーションによりデバイスIDが更新された場合 */
if (m_device_num != m_device_id)
{
- cvReleaseCapture(&m_capture);
+ //m_capture.release();
m_device_id = m_device_num;
+ m_capture.open(m_device_id);
/* カメラデバイスの再探索 */
- if (NULL == (m_capture = cvCaptureFromCAM(m_device_id)))
+ if (!m_capture.isOpened())
{
cout << "No Camera Device" << endl;
return RTC::RTC_ERROR;
}
}
+ //m_capture.set(CV_CAP_PROP_FRAME_WIDTH, m_frame_width);
+ //m_capture.set(CV_CAP_PROP_FRAME_HEIGHT, m_frame_height);
+ m_capture.set(CV_CAP_PROP_FPS, m_frame_rate);
+
+ m_capture >> cam_frame;
- cvSetCaptureProperty(m_capture, CV_CAP_PROP_FRAME_WIDTH, m_frame_width);
- cvSetCaptureProperty(m_capture, CV_CAP_PROP_FRAME_HEIGHT, m_frame_height);
- cvSetCaptureProperty(m_capture, CV_CAP_PROP_FPS, m_frame_rate);
-
- cam_frame = cvQueryFrame(m_capture);
- if (NULL == cam_frame)
+ if (cam_frame.empty())
{
cout << "Bad frame or no frame!!" << endl;
return RTC::RTC_ERROR;
}
- IplImage* frame = cvCreateImage(cvGetSize(cam_frame), 8, 3);
-
- if (cam_frame->origin == IPL_ORIGIN_TL)
+ cv::Mat frame;
+ //frame.create(cam_frame.size(), CV_8UC3);
+ frame = cam_frame;
+
+ /*if (cam_frame->origin == IPL_ORIGIN_TL)
{
- cvCopy(cam_frame, frame);
+ frame.copyTo(cam_frame);
} else {
- cvFlip(cam_frame, frame);
- }
+ cv::flip(cam_frame, frame, 0);
+
+ }*/
- int len = frame->nChannels * frame->width * frame->height;
+ int len = frame.channels() * frame.size().width * frame.size().height;
+
/* 画面のサイズ情報を入れる */
m_out.pixels.length(len);
- m_out.width = frame->width;
- m_out.height = frame->height;
+ m_out.width = frame.size().width;
+ m_out.height = frame.size().height;
- memcpy((void *)&(m_out.pixels[0]), frame->imageData, len);
- cvReleaseImage(&frame);
+ memcpy((void *)&(m_out.pixels[0]), frame.data, len);
+
/* 繋がってるコンポーネントがしんでしまうと問題発生 */
m_outOut.write();
Modified: trunk/ImageProcessing/opencv/components/Perspective/include/Perspective/Perspective.h
===================================================================
--- trunk/ImageProcessing/opencv/components/Perspective/include/Perspective/Perspective.h 2016-08-22 02:27:08 UTC (rev 180)
+++ trunk/ImageProcessing/opencv/components/Perspective/include/Perspective/Perspective.h 2017-02-02 00:05:08 UTC (rev 181)
@@ -20,9 +20,7 @@
#include <rtm/idl/InterfaceDataTypesSkel.h>
/* OpenCV用インクルードファイルのインクルード */
-#include<cv.h>
-#include<cxcore.h>
-#include<highgui.h>
+#include <opencv2/imgproc/imgproc.hpp>
// Service implementation headers
// <rtc-template block="service_impl_h">
@@ -275,10 +273,10 @@
// </rtc-template>
int m_in_height; /* 入力イメージのHeight */
int m_in_width; /* 入力イメージのWidth */
- IplImage* m_image_buff; /* Original Image */
- IplImage* m_image_dest; /* 結果出力用IplImage */
+ cv::Mat m_image_buff; /* Original Image */
+ cv::Mat m_image_dest; /* 結果出力用IplImage */
- CvMat *m_perspectiveMatrix; /* 変換Matrix */
+ cv::Mat m_perspectiveMatrix; /* 変換Matrix */
};
Modified: trunk/ImageProcessing/opencv/components/Perspective/src/Perspective.cpp
===================================================================
--- trunk/ImageProcessing/opencv/components/Perspective/src/Perspective.cpp 2016-08-22 02:27:08 UTC (rev 180)
+++ trunk/ImageProcessing/opencv/components/Perspective/src/Perspective.cpp 2017-02-02 00:05:08 UTC (rev 181)
@@ -99,15 +99,13 @@
RTC::ReturnCode_t Perspective::onActivated(RTC::UniqueId ec_id)
{
- /* イメージ用メモリの確保 */
- m_image_buff = NULL;
- m_image_dest = NULL;
+
m_in_height = 0;
m_in_width = 0;
/* 行列を生成する */
- m_perspectiveMatrix = cvCreateMat( 3, 3, CV_32FC1);
+ m_perspectiveMatrix.create(cv::Size(3, 3), CV_8UC1);
return RTC::RTC_OK;
}
@@ -115,13 +113,22 @@
RTC::ReturnCode_t Perspective::onDeactivated(RTC::UniqueId ec_id)
{
- if(m_image_buff != NULL)
- cvReleaseImage(&m_image_buff);
- if(m_image_dest != NULL)
- cvReleaseImage(&m_image_dest);
- cvReleaseMat(&m_perspectiveMatrix);
+ if (!m_image_buff.empty())
+ {
+ m_image_buff.release();
+ }
+ if (!m_image_dest.empty())
+ {
+ m_image_dest.release();
+ }
+ if (!m_perspectiveMatrix.empty())
+ {
+ m_perspectiveMatrix.release();
+ }
+
+
return RTC::RTC_OK;
}
@@ -143,52 +150,67 @@
m_in_height = m_image_orig.height;
m_in_width = m_image_orig.width;
- if(m_image_buff != NULL)
- cvReleaseImage(&m_image_buff);
- if(m_image_dest != NULL)
- cvReleaseImage(&m_image_dest);
+
/* サイズ変換のためTempメモリーを用意する */
- m_image_buff = cvCreateImage(cvSize(m_in_width, m_in_height), IPL_DEPTH_8U, 3);
- m_image_dest = cvCreateImage(cvSize(m_in_width, m_in_height), IPL_DEPTH_8U, 3);
+ m_image_buff.create(cv::Size(m_in_width, m_in_height), CV_8UC3);
+ m_image_dest.create(cv::Size(m_in_width, m_in_height), CV_8UC3);
}
/* InPortの画像データをIplImageのimageDataにコピー */
- memcpy(m_image_buff->imageData,(void *)&(m_image_orig.pixels[0]),m_image_orig.pixels.length());
+ memcpy(m_image_buff.data,(void *)&(m_image_orig.pixels[0]),m_image_orig.pixels.length());
// Anternative actions
- CvPoint2D32f original[4]; /* 変換前座標 */
- CvPoint2D32f translate[4]; /* 変換後座標 */
+ std::vector<cv::Point2f> original; /* 変換前座標 */
+ std::vector<cv::Point2f> translate; /* 変換後座標 */
+
/* 変換前の座標を設定する */
- original[0] = cvPoint2D32f( 0, 0 );
- original[1] = cvPoint2D32f( m_image_buff->width, 0 );
- original[2] = cvPoint2D32f( 0, m_image_buff->height );
- original[3] = cvPoint2D32f( m_image_buff->width, m_image_buff->height );
+ original.push_back(cv::Point2f(0, 0));
+ original.push_back(cv::Point2f(m_image_buff.size().width, 0));
+ original.push_back(cv::Point2f(0, m_image_buff.size().height));
+ original.push_back(cv::Point2f(m_image_buff.size().width, m_image_buff.size().height));
+
/* 変換後の座標を設定する */
- translate[0] = cvPoint2D32f( m_image_buff->width / 5 * 1, m_image_buff->height / 5 * 2 );
- translate[1] = cvPoint2D32f( m_image_buff->width / 5 * 4, m_image_buff->height / 5 * 2 );
- translate[2] = cvPoint2D32f( 0, m_image_buff->height / 5 * 4 );
- translate[3] = cvPoint2D32f( m_image_buff->width , m_image_buff->height / 5 * 4 );
+ translate.push_back(cv::Point2f(m_image_buff.size().width / 5 * 1, m_image_buff.size().height / 5 * 2));
+ translate.push_back(cv::Point2f(m_image_buff.size().width / 5 * 4, m_image_buff.size().height / 5 * 2));
+ translate.push_back(cv::Point2f(0, m_image_buff.size().height / 5 * 4));
+ translate.push_back(cv::Point2f(m_image_buff.size().width, m_image_buff.size().height / 5 * 4));
+ // 変換前の画像での座標
+ const cv::Point2f src_pt[] = {
+ cv::Point2f(88.0, 81.0),
+ cv::Point2f(111.0, 436.0),
+ cv::Point2f(420.0, 346.0),
+ cv::Point2f(424, 131) };
+
+ // 変換後の画像での座標
+ const cv::Point2f dst_pt[] = {
+ cv::Point2f(0.0, 0.0),
+ cv::Point2f(0, 0 - 1 - 200),
+ cv::Point2f(0 - 1, 0 - 1 - 200),
+ cv::Point2f(0 - 1, 0) };
+
+
/* 変換行列を求める */
- cvGetPerspectiveTransform( original, translate, m_perspectiveMatrix );
+ m_perspectiveMatrix = cv::getPerspectiveTransform(original, translate);
/* 変換行列を反映させる */
- cvWarpPerspective( m_image_buff, m_image_dest, m_perspectiveMatrix,
- CV_INTER_LINEAR | CV_WARP_FILL_OUTLIERS, cvScalarAll( 0 ) );
+ cv::warpPerspective(m_image_buff, m_image_dest, m_perspectiveMatrix,
+ m_image_dest.size());
+ //CV_INTER_LINEAR | CV_WARP_FILL_OUTLIERS, cv::Scalar::all(255) );
/* 画像データのサイズ取得 */
- int len = m_image_dest->nChannels * m_image_dest->width * m_image_dest->height;
+ int len = m_image_dest.channels() * m_image_dest.size().width * m_image_dest.size().height;
/* 画面のサイズ情報を入れる */
m_image_out.pixels.length(len);
- m_image_out.width = m_image_dest->width;
- m_image_out.height = m_image_dest->height;
+ m_image_out.width = m_image_dest.size().width;
+ m_image_out.height = m_image_dest.size().height;
/* 反転した画像データをOutPortにコピー */
- memcpy((void *)&(m_image_out.pixels[0]), m_image_dest->imageData,len);
+ memcpy((void *)&(m_image_out.pixels[0]), m_image_dest.data,len);
/* 反転した画像データをOutPortから出力する */
m_image_outOut.write();
Modified: trunk/ImageProcessing/opencv/components/RockPaperScissors/include/RockPaperScissors/RockPaperScissors.h
===================================================================
--- trunk/ImageProcessing/opencv/components/RockPaperScissors/include/RockPaperScissors/RockPaperScissors.h 2016-08-22 02:27:08 UTC (rev 180)
+++ trunk/ImageProcessing/opencv/components/RockPaperScissors/include/RockPaperScissors/RockPaperScissors.h 2017-02-02 00:05:08 UTC (rev 181)
@@ -20,9 +20,8 @@
#include <rtm/idl/InterfaceDataTypesSkel.h>
/* OpenCVHeadファイルのIncluding */
-#include <cv.h>
-#include <cxcore.h>
-#include <highgui.h>
+#include <opencv2/imgproc/imgproc.hpp>
+#include <opencv2/highgui/highgui.hpp>
#include <string>
#include "Labeling.h"
@@ -343,23 +342,24 @@
void extractSkinColor( void );
void interpolate( void );
int pickupMaxArea( void );
- void createConvexHull( int handarea, CvPoint **handpoint, int **hull,
- CvMat *pointMatrix, CvMat *hullMatrix );
- void drawConvexHull( CvPoint *handpoint, int *hull, int hullcount );
- int calcConvexHullArea( CvPoint *handpoint, int *hull, int hullcount );
+ void createConvexHull(int handarea, cv::Mat &pointMatrix, cv::Mat &hullMatrix );
+ void drawConvexHull(cv::Mat &handpoint, cv::Mat &hull, int hullcount);
+ int calcConvexHullArea(cv::Mat &handpoint, cv::Mat &hull, int hullcount);
void decide( int handarea, int hullarea );
int dummy;
std::string m_prev_judge; /* 前回の判定 グー / チョキ / パー */
- IplImage* m_frame_image;
- IplImage* m_image_buff;
- IplImage* m_output_buff;
- IplImage* m_hsv_buff;
- IplImage* m_convexHull_buff;
- IplImage* m_skin_buff;
- IplImage* m_temp_buff;
- IplImage* m_label_buff;
+ cv::Mat m_frame_image;
+ cv::Mat m_image_buff;
+ cv::Mat m_output_buff;
+ cv::Mat m_hsv_buff;
+ cv::Mat m_convexHull_buff;
+ cv::Mat m_skin_buff;
+ cv::Mat m_temp_buff;
+ cv::Mat m_label_buff;
+
+ cv::VideoCapture capture;
};
Modified: trunk/ImageProcessing/opencv/components/RockPaperScissors/src/RockPaperScissors.cpp
===================================================================
--- trunk/ImageProcessing/opencv/components/RockPaperScissors/src/RockPaperScissors.cpp 2016-08-22 02:27:08 UTC (rev 180)
+++ trunk/ImageProcessing/opencv/components/RockPaperScissors/src/RockPaperScissors.cpp 2017-02-02 00:05:08 UTC (rev 181)
@@ -12,8 +12,8 @@
using namespace std;
-CvCapture *capture = NULL;
+
// Module specification
// <rtc-template block="module_spec">
static const char* rockpaperscissors_spec[] =
@@ -63,10 +63,10 @@
return new LabelingBS();
}
-int exec(Label *label,IplImage *target,IplImage *result,
+int exec(Label *label, cv::Mat target, cv::Mat result,
const bool is_sort_region,int region_size_min){
- return label->Exec((unsigned char *)target->imageData,(short *)result->imageData,
- target->width,target->height,is_sort_region,region_size_min);
+ return label->Exec((unsigned char *)target.data,(short *)result.data,
+ target.size().width, target.size().height, is_sort_region, region_size_min);
}
int getNumOfResultRegions(Label *label){
@@ -160,13 +160,7 @@
RTC::ReturnCode_t RockPaperScissors::onActivated(RTC::UniqueId ec_id)
{
- m_image_buff = NULL;
- m_hsv_buff = NULL;
- m_convexHull_buff = NULL;
- m_skin_buff = NULL;
- m_temp_buff = NULL;
- m_label_buff = NULL;
- m_output_buff = NULL;
+
m_prev_judge = "";
return RTC::RTC_OK;
@@ -175,130 +169,146 @@
RTC::ReturnCode_t RockPaperScissors::onDeactivated(RTC::UniqueId ec_id)
{
- if(m_image_buff != NULL){
- cvReleaseImage(&m_image_buff);
+
+
+ if (!m_image_buff.empty())
+ {
+ m_image_buff.release();
+ }
+ if (!m_hsv_buff.empty())
+ {
+ m_hsv_buff.release();
+ }
+ if (!m_convexHull_buff.empty())
+ {
+ m_convexHull_buff.release();
}
- if(m_hsv_buff != NULL){
- cvReleaseImage(&m_hsv_buff);
+ if (!m_skin_buff.empty())
+ {
+ m_skin_buff.release();
+ }
+ if (!m_temp_buff.empty())
+ {
+ m_temp_buff.release();
+ }
+ if (!m_label_buff.empty())
+ {
+ m_label_buff.release();
}
- if(m_convexHull_buff != NULL){
- cvReleaseImage(&m_convexHull_buff);
+ if (!m_output_buff.empty())
+ {
+ m_output_buff.release();
}
- if(m_skin_buff != NULL){
- cvReleaseImage(&m_skin_buff);
- }
- if(m_temp_buff != NULL){
- cvReleaseImage(&m_temp_buff);
- }
- if(m_label_buff != NULL){
- cvReleaseImage(&m_label_buff);
- }
- if(m_output_buff != NULL){
- cvReleaseImage(&m_output_buff);
- }
return RTC::RTC_OK;
}
RTC::ReturnCode_t RockPaperScissors::onExecute(RTC::UniqueId ec_id)
-{
+{
+
/* 新データのチェック */
if(m_img_inputIn.isNew()){
/* データの読み込み */
m_img_inputIn.read();
- m_image_buff = cvCreateImage(cvSize(m_img_input.width, m_img_input.height), IPL_DEPTH_8U, 3); /* 入力Image */
- m_hsv_buff = cvCreateImage(cvSize(m_img_input.width, m_img_input.height), IPL_DEPTH_8U, 3); /* HSV用 */
- m_convexHull_buff = cvCreateImage(cvSize(m_img_input.width, m_img_input.height), IPL_DEPTH_8U, 3); /* ConvexHull用 */
- m_skin_buff = cvCreateImage(cvSize(m_img_input.width, m_img_input.height), IPL_DEPTH_8U, 1); /* 肌色抽出用 */
- m_temp_buff = cvCreateImage(cvSize(m_img_input.width, m_img_input.height), IPL_DEPTH_8U, 1); /* 一時保存用 */
- m_label_buff = cvCreateImage(cvSize(m_img_input.width, m_img_input.height), IPL_DEPTH_16S, 1); /* ラベル結果保存用 */
- m_output_buff = cvCreateImage(cvSize(m_img_input.width, m_img_input.height), IPL_DEPTH_8U, 3); /* 出力用 */
+ m_image_buff.create(cv::Size(m_img_input.width, m_img_input.height), CV_8UC3);
+ m_hsv_buff.create(cv::Size(m_img_input.width, m_img_input.height), CV_8UC3);
+ m_convexHull_buff.create(cv::Size(m_img_input.width, m_img_input.height), CV_8UC3);
+ m_skin_buff.create(cv::Size(m_img_input.width, m_img_input.height), CV_8UC1);
+ m_temp_buff.create(cv::Size(m_img_input.width, m_img_input.height), CV_8UC1);
+ m_label_buff.create(cv::Size(m_img_input.width, m_img_input.height), CV_16SC1);
+ m_output_buff.create(cv::Size(m_img_input.width, m_img_input.height), CV_8UC3);
+
+
+
/* InPortの映像の取得 */
- memcpy(m_image_buff->imageData,(void *)&(m_img_input.pixels[0]),m_img_input.pixels.length());
-
+ memcpy(m_image_buff.data,(void *)&(m_img_input.pixels[0]),m_img_input.pixels.length());
/* 肌色を抽出する */
extractSkinColor();
-
/* 欠損領域を補間する */
interpolate();
-
+
/* ラベリングを行う */
Label *labeling = createLabeling();
+ cv::Mat pointMatrix; /* 手領域用行列 */
+ cv::Mat hullMatrix; /* ConvexHull用行列 */
+
exec( labeling, m_skin_buff, m_label_buff, true, IGNORE_SIZE );
-
+
if(getNumOfResultRegions( labeling ) > 0 )
{
/* IGNORE_SIZEよりも大きな領域があった場合 */
int handarea; /* 手領域の面積 */
int hullarea; /* ConvexHull内の面積 */
int hullcount; /* ConvexHullの頂点の数 */
- CvPoint *handpoint; /* 手領域内の点の座標配列 */
- int *hull; /* ConvexHullの頂点のhandpointにおけるindex番号 */
- CvMat pointMatrix; /* 手領域用行列 */
- CvMat hullMatrix; /* ConvexHull用行列 */
+ //std::vector<cv::Point> handpoint; /* 手領域内の点の座標配列 */
+ //std::vector<int> hull; /* ConvexHullの頂点のhandpointにおけるindex番号 */
+
/* 最大領域(手領域)の抽出を行う */
handarea = pickupMaxArea();
/* ConvexHullを生成する */
- createConvexHull( handarea, &handpoint, &hull, &pointMatrix, &hullMatrix );
+ createConvexHull( handarea, pointMatrix, hullMatrix );
+
+ //hullcount = hullMatrix.cols;
+ hullcount = hullMatrix.rows;
- hullcount = hullMatrix.cols;
-
/* ConvexHullを描画する */
- drawConvexHull( handpoint, hull, hullcount );
-
+ drawConvexHull(pointMatrix, hullMatrix, hullcount);
+
/* ConvexHull内の面積を求める */
- hullarea = calcConvexHullArea( handpoint,hull, hullcount );
-
+ hullarea = calcConvexHullArea(pointMatrix, hullMatrix, hullcount);
+
/* ジャンケンの判定を行う */
decide( handarea, hullarea );
/* メモリを解放する */
- free( handpoint );
- free( hull );
+
+
} else {
/* 画像を初期化する */
- cvSetZero( m_convexHull_buff );
+ m_convexHull_buff = cv::Mat::zeros(cv::Size(m_img_input.width, m_img_input.height), CV_8UC3);
}
releaseLabeling( labeling );
+
- if ( m_skin_buff->origin == 0 ) {
- /* 左上が原点の場合 */
- cvFlip( m_skin_buff, m_skin_buff, 0 );
- }
- if ( m_convexHull_buff->origin == 0 ) {
- /* 左上が原点の場合 */
- cvFlip( m_convexHull_buff, m_convexHull_buff, 0 );
- }
+ /* 左上が原点の場合 */
+ //cv::flip( m_skin_buff, m_skin_buff, 0 );
+
+
+ /* 左上が原点の場合 */
+ //cv::flip( m_convexHull_buff, m_convexHull_buff, 0 );
+
+ std::vector<cv::Mat> tmp;
+ tmp.push_back(m_skin_buff);
+ tmp.push_back(m_skin_buff);
+ tmp.push_back(m_skin_buff);
+ cv::Mat test_mat = cv::Mat(cv::Size(m_img_input.width, m_img_input.height), CV_8UC3);
+ cv::merge(tmp, test_mat);
+
/* 画像データのサイズ取得 */
- int len = (m_output_buff->nChannels * m_output_buff->width * m_output_buff->height);
-
+ int len = (m_convexHull_buff.channels() * m_convexHull_buff.size().width * m_convexHull_buff.size().height);
+
m_img_output.pixels.length(len);
/* 該当のイメージをMemCopyする */
- memcpy((void *)&(m_img_output.pixels[0]), m_convexHull_buff->imageData, len);
+ memcpy((void *)&(m_img_output.pixels[0]), m_convexHull_buff.data, len);
+
/* 反転した画像データをOutPortから出力する */
- m_img_output.width = m_image_buff->width;
- m_img_output.height = m_image_buff->height;
-
+ m_img_output.width = m_image_buff.size().width;
+ m_img_output.height = m_image_buff.size().height;
+
m_img_outputOut.write();
- cvReleaseImage(&m_image_buff);
- cvReleaseImage(&m_hsv_buff);
- cvReleaseImage(&m_convexHull_buff);
- cvReleaseImage(&m_skin_buff);
- cvReleaseImage(&m_temp_buff);
- cvReleaseImage(&m_label_buff);
- cvReleaseImage(&m_output_buff);
+
}
@@ -345,20 +355,20 @@
//
void RockPaperScissors::extractSkinColor( void )
{
- CvScalar color; /* HSV表色系で表した色 */
+ cv::Vec3b color; /* HSV表色系で表した色 */
unsigned char h; /* H成分 */
unsigned char s; /* S成分 */
unsigned char v; /* V成分 */
/* BGRからHSVに変換する */
- cvCvtColor( m_image_buff, m_hsv_buff, CV_BGR2HSV );
+ cv::cvtColor(m_image_buff, m_hsv_buff, cv::COLOR_BGR2HSV);
/* 肌色抽出 */
- for( int x = 0; x < m_skin_buff->width; x++ )
+ for (int x = 0; x < m_hsv_buff.size().width; x++)
{
- for( int y = 0 ; y < m_skin_buff->height; y++ )
+ for (int y = 0; y < m_hsv_buff.size().height; y++)
{
- color = cvGet2D( m_hsv_buff, y, x );
+ color = m_hsv_buff.at<cv::Vec3b>(cv::Point(x, y));
h = color.val[0];
s = color.val[1];
v = color.val[2];
@@ -368,9 +378,11 @@
v <= VMAX && v >= VMIN )
{
/* 肌色の場合 */
- cvSetReal2D( m_skin_buff, y, x, 255 );
+ m_skin_buff.at<uchar>(cv::Point(x, y)) = 255;
+ //cvSetReal2D( m_skin_buff, y, x, 255 );
} else {
- cvSetReal2D( m_skin_buff, y, x, 0 );
+ m_skin_buff.at<uchar>(cv::Point(x, y)) = 0;
+ //cvSetReal2D( m_skin_buff, y, x, 0 );
}
}
}
@@ -382,10 +394,10 @@
void RockPaperScissors::interpolate( void )
{
/* 膨張をITERATIONS回行う */
- cvDilate( m_skin_buff, m_temp_buff, NULL, m_iterations );
+ cv::dilate( m_skin_buff, m_temp_buff, m_iterations );
/* 収縮をITERATIONS回行う */
- cvErode( m_temp_buff, m_skin_buff, NULL, m_iterations );
+ cv::erode( m_temp_buff, m_skin_buff, m_iterations );
}
//
@@ -398,18 +410,19 @@
{
int handarea = 0; /* 手領域の面積 */
- for(int x = 0; x < m_skin_buff->width; x++ )
+ for (int x = 0; x < m_label_buff.size().width; x++)
{
- for( int y=0; y < m_skin_buff->height; y++ )
+ for (int y = 0; y < m_label_buff.size().height; y++)
{
- if( cvGetReal2D( m_label_buff, y, x ) == 1 )
+ if (m_label_buff.at<short>(cv::Point(x, y)) == 1)
{
/* 最大領域だった場合 */
handarea++;
- cvSet2D( m_convexHull_buff, y, x, CV_RGB( 255, 255, 255 ) );
+ m_convexHull_buff.at<cv::Vec3b>(cv::Point(x, y)) = cv::Vec3b(255, 255, 255);
+
} else {
- cvSetReal2D( m_skin_buff, y, x, 0 );
- cvSet2D( m_convexHull_buff, y, x, CV_RGB( 0, 0, 0 ) );
+ m_skin_buff.at<uchar>(cv::Point(x, y)) = 0;
+ m_convexHull_buff.at< cv::Vec3b>(cv::Point(x, y)) = cv::Vec3b(0, 0, 0);
}
}
}
@@ -426,32 +439,41 @@
// pointMatrix : 手領域用行列へのポインタ
// hullMatrix : ConvexHull用行列へのポインタ
//
-void RockPaperScissors::createConvexHull( int handarea, CvPoint **handpoint, int **hull,
- CvMat *pointMatrix, CvMat *hullMatrix )
+void RockPaperScissors::createConvexHull(int handarea, cv::Mat &pointMatrix, cv::Mat &hullMatrix )
{
int i=0;
/* ConvexHullを計算するために必要な行列を生成する */
- *handpoint=( CvPoint * )malloc( sizeof( CvPoint ) * handarea );
- *hull = ( int * )malloc( sizeof( int ) * handarea );
- *pointMatrix = cvMat( 1, handarea, CV_32SC2, *handpoint );
- *hullMatrix = cvMat( 1, handarea, CV_32SC1, *hull );
+
+ //*handpoint=( cv::Point * )malloc( sizeof( CvPoint ) * handarea );
+ //*hull = ( int * )malloc( sizeof( int ) * handarea );
+
+ pointMatrix.create(cv::Point(handarea, 1), CV_32SC2);
+ hullMatrix.create(cv::Point(handarea, 1), CV_32SC1);
+
- for( int x = 0; x < m_skin_buff->width; x++ )
+
+ for( int x = 0; x < m_skin_buff.size().width; x++ )
{
- for( int y = 0; y < m_skin_buff->height; y++ )
+ for (int y = 0; y < m_skin_buff.size().height; y++)
{
- if( cvGetReal2D( m_skin_buff, y, x ) == 255 )
+ if (m_skin_buff.at<uchar>(cv::Point(x,y)) == 255)
{
- ( *handpoint )[i].x = x;
- ( *handpoint )[i].y = y;
+ pointMatrix.at<cv::Vec2i>(i) = cv::Vec2i(x, y);
+
+ //hull[i] = 0;
i++;
}
}
}
+
+ //pointMatrix.copyTo(handpoint);
+ //hullMatrix.copyTo(hull);
+
+
/* ConvexHullを生成する */
- cvConvexHull2( pointMatrix, hullMatrix, CV_CLOCKWISE, 0 );
+ cv::convexHull( pointMatrix, hullMatrix, CV_CLOCKWISE, 0 );
}
//
@@ -462,13 +484,17 @@
// hull : ConvexHullの頂点のhandpointにおけるindex番号
// hullcount : ConvexHullの頂点の数
//
-void RockPaperScissors::drawConvexHull( CvPoint *handpoint, int *hull, int hullcount )
+void RockPaperScissors::drawConvexHull(cv::Mat &handpoint, cv::Mat &hull, int hullcount)
{
- CvPoint pt0 = handpoint[hull[hullcount-1]];
+
+ cv::Point pt0 = handpoint.at<cv::Vec2i>(hull.at<long>(hullcount - 1));
+
for( int i = 0; i < hullcount; i++ )
{
- CvPoint pt = handpoint[hull[i]];
- cvLine( m_convexHull_buff, pt0, pt, CV_RGB( 0, 255, 0 ) );
+
+ cv::Point pt = handpoint.at<cv::Vec2i>(hull.at<long>(i));
+
+ cv::line( m_convexHull_buff, pt0, pt, CV_RGB( 0, 255, 0 ) );
pt0 = pt;
}
}
@@ -484,31 +510,38 @@
// 戻り値:
// ConvexHull内の面積
//
-int RockPaperScissors::calcConvexHullArea( CvPoint *handpoint, int *hull, int hullcount )
+int RockPaperScissors::calcConvexHullArea(cv::Mat &handpoint, cv::Mat &hull, int hullcount)
{
/* ConvexHullの頂点からなる行列を生成 */
- CvPoint *hullpoint = ( CvPoint * )malloc( sizeof( CvPoint ) * hullcount );
+ //cv::Point *hullpoint = ( cv::Point * )malloc( sizeof( CvPoint ) * hullcount );
+ //std::vector<cv::Point> hullpoint;
- CvMat hMatrix = cvMat( 1, hullcount, CV_32SC2, hullpoint );
+
+ cv::Mat hMatrix;
+ hMatrix.create(cv::Point(1, hullcount), CV_32SC2);
+
for( int i = 0; i < hullcount; i++ )
{
- hullpoint[i]=handpoint[hull[i]];
+ hMatrix.at<cv::Vec2i>(i) = handpoint.at<cv::Vec2i>(hull.at<long>(i));
}
+
+ //hMatrix.copyTo(hullpoint);
+
/* ConvexHull内の点の数を数える */
int hullarea = 0;
- for( int x = 0; x < m_convexHull_buff->width; x++ )
+ for( int x = 0; x < m_convexHull_buff.size().width; x++ )
{
- for( int y = 0;y < m_convexHull_buff->height; y++ )
+ for (int y = 0; y < m_convexHull_buff.size().height; y++)
{
- if( cvPointPolygonTest( &hMatrix, cvPoint2D32f( x, y ), 0 ) > 0)
+ if( cv::pointPolygonTest( hMatrix, cv::Point2f( x, y ), 0 ) > 0)
{
hullarea++;
}
}
}
- free( hullpoint );
+
return hullarea;
}
@@ -527,7 +560,7 @@
string judge;
ratio=handarea / ( double )hullarea;
-
+
if( ratio >= m_rock_min && ratio <= m_rock_max ) {
judge = "Rock";
}
Modified: trunk/ImageProcessing/opencv/components/Rotate/include/Rotate/Rotate.h
===================================================================
--- trunk/ImageProcessing/opencv/components/Rotate/include/Rotate/Rotate.h 2016-08-22 02:27:08 UTC (rev 180)
+++ trunk/ImageProcessing/opencv/components/Rotate/include/Rotate/Rotate.h 2017-02-02 00:05:08 UTC (rev 181)
@@ -20,9 +20,7 @@
#include <rtm/idl/InterfaceDataTypesSkel.h>
/* OpenCV用インクルードファイルのインクルード */
-#include<cv.h>
-#include<cxcore.h>
-#include<highgui.h>
+#include <opencv2/imgproc/imgproc.hpp>
// Service implementation headers
// <rtc-template block="service_impl_h">
@@ -288,11 +286,11 @@
int m_in_height; /* 入力イメージのHeight */
int m_in_width; /* 入力イメージのWidth */
- IplImage* m_image_buff; /* Original Image */
+ cv::Mat m_image_buff; /* Original Image */
- IplImage* m_image_dest; /* 結果出力用IplImage */
+ cv::Mat m_image_dest; /* 結果出力用IplImage */
- CvMat *m_transformMatrix;
+ cv::Mat m_transformMatrix;
};
Modified: trunk/ImageProcessing/opencv/components/Rotate/src/Rotate.cpp
===================================================================
--- trunk/ImageProcessing/opencv/components/Rotate/src/Rotate.cpp 2016-08-22 02:27:08 UTC (rev 180)
+++ trunk/ImageProcessing/opencv/components/Rotate/src/Rotate.cpp 2017-02-02 00:05:08 UTC (rev 181)
@@ -117,7 +117,7 @@
m_in_width = 0;
/* 行列を生成する */
- m_transformMatrix = cvCreateMat( 2, 3, CV_32FC1);
+ m_transformMatrix.create( 2, 3, CV_32FC1);
return RTC::RTC_OK;
}
@@ -125,13 +125,22 @@
RTC::ReturnCode_t Rotate::onDeactivated(RTC::UniqueId ec_id)
{
- if(m_image_buff != NULL)
- cvReleaseImage(&m_image_buff);
- if(m_image_dest != NULL)
- cvReleaseImage(&m_image_dest);
- cvReleaseMat(&m_transformMatrix);
+ if (!m_image_buff.empty())
+ {
+ m_image_buff.release();
+ }
+ if (!m_image_dest.empty())
+ {
+ m_image_dest.release();
+ }
+ if (!m_transformMatrix.empty())
+ {
+ m_transformMatrix.release();
+ }
+
+
return RTC::RTC_OK;
}
@@ -153,39 +162,36 @@
m_in_height = m_image_orig.height;
m_in_width = m_image_orig.width;
- if(m_image_buff != NULL)
- cvReleaseImage(&m_image_buff);
- if(m_image_dest != NULL)
- cvReleaseImage(&m_image_dest);
/* サイズ変換のためTempメモリーを用意する */
- m_image_buff = cvCreateImage(cvSize(m_in_width, m_in_height), IPL_DEPTH_8U, 3);
- m_image_dest = cvCreateImage(cvSize(m_in_width, m_in_height), IPL_DEPTH_8U, 3);
+ m_image_buff.create(cv::Size(m_in_width, m_in_height), CV_8UC3);
+ m_image_dest.create(cv::Size(m_in_width, m_in_height), CV_8UC3);
+
}
- memcpy(m_image_buff->imageData,(void *)&(m_image_orig.pixels[0]),m_image_orig.pixels.length());
+ memcpy(m_image_buff.data,(void *)&(m_image_orig.pixels[0]),m_image_orig.pixels.length());
/* Anternative process */
/* 回転中心 */
- CvPoint2D32f center = cvPoint2D32f( m_image_buff->width / 2.0, m_image_buff->height / 2.0);
+ CvPoint2D32f center = cvPoint2D32f(m_image_buff.size().width / 2.0, m_image_buff.size().height / 2.0);
/* 変換行列を求める */
- cv2DRotationMatrix( center, m_dbRotate, m_dbScale, m_transformMatrix);
+ m_transformMatrix = cv::getRotationMatrix2D(center, m_dbRotate, m_dbScale);
/* 画像の拡大、縮小、回転を行う */
- cvWarpAffine( m_image_buff, m_image_dest, m_transformMatrix, CV_INTER_LINEAR | CV_WARP_FILL_OUTLIERS, cvScalarAll( 0 ) );
+ cv:warpAffine(m_image_buff, m_image_dest, m_transformMatrix, m_image_dest.size(), cv::INTER_LINEAR, cv::BORDER_CONSTANT, cv::Scalar::all(255));
/* Common process */
/* 画像データのサイズ取得 */
- int len = m_image_dest->nChannels * m_image_dest->width * m_image_dest->height;
+ int len = m_image_dest.channels() * m_image_dest.size().width * m_image_dest.size().height;
/* 画面のサイズ情報を入れる */
m_image_output.pixels.length(len);
- m_image_output.width = m_image_dest->width;
- m_image_output.height = m_image_dest->height;
+ m_image_output.width = m_image_dest.size().width;
+ m_image_output.height = m_image_dest.size().height;
/* 反転した画像データをOutPortにコピー */
- memcpy((void *)&(m_image_output.pixels[0]), m_image_dest->imageData,len);
+ memcpy((void *)&(m_image_output.pixels[0]), m_image_dest.data,len);
m_image_outputOut.write();
}
Modified: trunk/ImageProcessing/opencv/components/Scale/include/Scale/Scale.h
===================================================================
--- trunk/ImageProcessing/opencv/components/Scale/include/Scale/Scale.h 2016-08-22 02:27:08 UTC (rev 180)
+++ trunk/ImageProcessing/opencv/components/Scale/include/Scale/Scale.h 2017-02-02 00:05:08 UTC (rev 181)
@@ -20,9 +20,7 @@
#include <rtm/idl/InterfaceDataTypesSkel.h>
/* OpenCV用インクルードファイルのインクルード */
-#include<cv.h>
-#include<cxcore.h>
-#include<highgui.h>
+#include <opencv2/imgproc/imgproc.hpp>
// Service implementation headers
// <rtc-template block="service_impl_h">
@@ -285,8 +283,8 @@
// <rtc-template block="private_operation">
// </rtc-template>
- IplImage* m_image_buff; /* Original Image */
- IplImage* m_image_dest; /* 結果出力用IplImage */
+ cv::Mat m_image_buff; /* Original Image */
+ cv::Mat m_image_dest; /* 結果出力用IplImage */
double m_currentScaleX; /* 現在のX方向拡大比率 */
double m_currentScaleY; /* 現在のY方向拡大比率 */
Modified: trunk/ImageProcessing/opencv/components/Scale/src/Scale.cpp
===================================================================
--- trunk/ImageProcessing/opencv/components/Scale/src/Scale.cpp 2016-08-22 02:27:08 UTC (rev 180)
+++ trunk/ImageProcessing/opencv/components/Scale/src/Scale.cpp 2017-02-02 00:05:08 UTC (rev 181)
@@ -127,13 +127,14 @@
RTC::ReturnCode_t Scale::onDeactivated(RTC::UniqueId ec_id)
{
- if(m_image_buff != NULL)
+
+ if (!m_image_buff.empty())
{
- cvReleaseImage(&m_image_buff);
+ m_image_buff.release();
}
- if(m_image_dest != NULL)
+ if (!m_image_dest.empty())
{
- cvReleaseImage(&m_image_dest);
+ m_image_dest.release();
}
return RTC::RTC_OK;
}
@@ -158,18 +159,15 @@
m_in_height = m_image_orig.height;
m_in_width = m_image_orig.width;
- if(m_image_buff != NULL)
- {
- cvReleaseImage(&m_image_buff);
- }
- m_image_buff = cvCreateImage(cvSize(m_image_orig.width, m_image_orig.height), IPL_DEPTH_8U, 3);
+
+ m_image_buff.create(cv::Size(m_image_orig.width, m_image_orig.height), CV_8UC3);
}
/* InPortの画像データをIplImageのimageDataにコピー */
- memcpy(m_image_buff->imageData, (void *)&(m_image_orig.pixels[0]), m_image_orig.pixels.length());
+ memcpy(m_image_buff.data, (void *)&(m_image_orig.pixels[0]), m_image_orig.pixels.length());
/* 拡大比率が更新されたら出力イメージ用メモリを再確保する */
- if(m_image_dest == NULL || m_currentScaleX != m_scale_x || m_currentScaleY != m_scale_y)
+ if(m_image_dest.empty() || m_currentScaleX != m_scale_x || m_currentScaleY != m_scale_y)
{
m_currentScaleX = m_scale_x;
m_currentScaleY = m_scale_y;
@@ -179,25 +177,23 @@
(int)(m_in_height * m_currentScaleY));
/* 既存のメモリを解放する */
- if(m_image_dest != NULL)
- cvReleaseImage(&m_image_dest);
- m_image_dest = cvCreateImage(cvSize((int)(m_in_width * m_currentScaleX),
- (int)(m_in_height * m_currentScaleY)), IPL_DEPTH_8U, 3);
+ m_image_dest.create(cv::Size(m_in_width * m_currentScaleX, m_in_height * m_currentScaleY), CV_8UC3);
+
}
/* 画像の大きさを変換する */
- cvResize( m_image_buff, m_image_dest, CV_INTER_LINEAR );
+ cv::resize(m_image_buff, m_image_dest, m_image_dest.size(), CV_INTER_LINEAR);
/* 画像データのサイズ取得 */
- int len = m_image_dest->nChannels * m_image_dest->width * m_image_dest->height;
+ int len = m_image_dest.channels() * m_image_dest.size().width * m_image_dest.size().height;
m_image_output.pixels.length(len);
/* 画面のサイズ情報を入れる */
- m_image_output.width = m_image_dest->width;
- m_image_output.height = m_image_dest->height;
+ m_image_output.width = m_image_dest.size().width;
+ m_image_output.height = m_image_dest.size().height;
/* 反転した画像データをOutPortにコピー */
- memcpy((void *)&(m_image_output.pixels[0]), m_image_dest->imageData,len);
+ memcpy((void *)&(m_image_output.pixels[0]), m_image_dest.data, len);
m_image_outputOut.write();
}
Modified: trunk/ImageProcessing/opencv/components/Sepia/include/Sepia/Sepia.h
===================================================================
--- trunk/ImageProcessing/opencv/components/Sepia/include/Sepia/Sepia.h 2016-08-22 02:27:08 UTC (rev 180)
+++ trunk/ImageProcessing/opencv/components/Sepia/include/Sepia/Sepia.h 2017-02-02 00:05:08 UTC (rev 181)
@@ -20,9 +20,7 @@
#include <rtm/idl/InterfaceDataTypesSkel.h>
/* OpenCV用インクルードファイルのインクルード */
-#include<cv.h>
-#include<cxcore.h>
-#include<highgui.h>
+#include <opencv2/imgproc/imgproc.hpp>
// Service implementation headers
// <rtc-template block="service_impl_h">
@@ -289,15 +287,15 @@
int m_in_height; /* 入力イメージのHeight */
int m_in_width; /* 入力イメージのWidth */
- IplImage* m_image_buff; /* Original Image */
+ cv::Mat m_image_buff; /* Original Image */
- IplImage* m_hsvImage; /* HSV画像用IplImage */
- IplImage* m_hueImage; /* 色相(H)情報用IplImage */
- IplImage* m_saturationImage; /* 彩度(S)情報用IplImage */
- IplImage* m_valueImage; /* 明度(V)情報用IplImage */
+ cv::Mat m_hsvImage; /* HSV画像用IplImage */
+ cv::Mat m_hueImage; /* 色相(H)情報用IplImage */
+ cv::Mat m_saturationImage; /* 彩度(S)情報用IplImage */
+ cv::Mat m_valueImage; /* 明度(V)情報用IplImage */
- IplImage* m_mergeImage; /* マージ用IplImage */
- IplImage* m_destinationImage; /* 結果出力用IplImage */
+ cv::Mat m_mergeImage; /* マージ用IplImage */
+ cv::Mat m_destinationImage; /* 結果出力用IplImage */
};
Modified: trunk/ImageProcessing/opencv/components/Sepia/src/Sepia.cpp
===================================================================
--- trunk/ImageProcessing/opencv/components/Sepia/src/Sepia.cpp 2016-08-22 02:27:08 UTC (rev 180)
+++ trunk/ImageProcessing/opencv/components/Sepia/src/Sepia.cpp 2017-02-02 00:05:08 UTC (rev 181)
@@ -109,18 +109,8 @@
RTC::ReturnCode_t Sepia::onActivated(RTC::UniqueId ec_id)
{
- /* イメージ用メモリの確保 */
- m_image_buff = NULL;
- m_hsvImage = NULL;
- m_hueImage = NULL;
- m_saturationImage = NULL;
- m_valueImage = NULL;
-
- m_mergeImage = NULL;
- m_destinationImage = NULL;
-
m_in_height = 0;
m_in_width = 0;
@@ -131,21 +121,37 @@
RTC::ReturnCode_t Sepia::onDeactivated(RTC::UniqueId ec_id)
{
/* イメージ用メモリの解放 */
- if(m_image_buff != NULL)
- cvReleaseImage(&m_image_buff);
- if(m_hsvImage != NULL)
- cvReleaseImage(&m_hsvImage);
- if(m_hueImage != NULL)
- cvReleaseImage(&m_hueImage);
- if(m_saturationImage != NULL)
- cvReleaseImage(&m_saturationImage);
- if(m_valueImage != NULL)
- cvReleaseImage(&m_valueImage);
- if(m_mergeImage != NULL)
- cvReleaseImage(&m_mergeImage);
- if(m_destinationImage != NULL)
- cvReleaseImage(&m_destinationImage);
+
+ if (!m_image_buff.empty())
+ {
+ m_image_buff.release();
+ }
+ if (!m_hsvImage.empty())
+ {
+ m_hsvImage.release();
+ }
+ if (!m_hueImage.empty())
+ {
+ m_hueImage.release();
+ }
+ if (!m_saturationImage.empty())
+ {
+ m_saturationImage.release();
+ }
+ if (!m_valueImage.empty())
+ {
+ m_valueImage.release();
+ }
+ if (!m_mergeImage.empty())
+ {
+ m_mergeImage.release();
+ }
+ if (!m_destinationImage.empty())
+ {
+ m_destinationImage.release();
+ }
+
return RTC::RTC_OK;
}
@@ -167,61 +173,60 @@
m_in_height = m_image_orig.height;
m_in_width = m_image_orig.width;
- if(m_image_buff != NULL)
- cvReleaseImage(&m_image_buff);
- if(m_hsvImage != NULL)
- cvReleaseImage(&m_hsvImage);
- if(m_hueImage != NULL)
- cvReleaseImage(&m_hueImage);
- if(m_saturationImage != NULL)
- cvReleaseImage(&m_saturationImage);
- if(m_valueImage != NULL)
- cvReleaseImage(&m_valueImage);
- if(m_mergeImage != NULL)
- cvReleaseImage(&m_mergeImage);
- if(m_destinationImage != NULL)
- cvReleaseImage(&m_destinationImage);
- m_image_buff = cvCreateImage(cvSize(m_in_width, m_in_height), IPL_DEPTH_8U, 3);
- m_hsvImage = cvCreateImage(cvSize(m_in_width, m_in_height), IPL_DEPTH_8U, 3);
- m_hueImage = cvCreateImage(cvSize(m_in_width, m_in_height), IPL_DEPTH_8U, 1);
- m_saturationImage = cvCreateImage(cvSize(m_in_width, m_in_height), IPL_DEPTH_8U, 1);
- m_valueImage = cvCreateImage(cvSize(m_in_width, m_in_height), IPL_DEPTH_8U, 1);
- m_mergeImage = cvCreateImage(cvSize(m_in_width, m_in_height), IPL_DEPTH_8U, 3);
- m_destinationImage = cvCreateImage(cvSize(m_in_width, m_in_height), IPL_DEPTH_8U, 3);
+
+
+
+ m_image_buff.create(cv::Size(m_in_width, m_in_height), CV_8UC3);
+ m_hsvImage.create(cv::Size(m_in_width, m_in_height), CV_8UC3);
+ m_hueImage.create(cv::Size(m_in_width, m_in_height), CV_8UC1);
+ m_saturationImage.create(cv::Size(m_in_width, m_in_height), CV_8UC1);
+ m_valueImage.create(cv::Size(m_in_width, m_in_height), CV_8UC1);
+ m_mergeImage.create(cv::Size(m_in_width, m_in_height), CV_8UC3);
+ m_destinationImage.create(cv::Size(m_in_width, m_in_height), CV_8UC3);
}
/* InPortの画像データをIplImageのimageDataにコピー */
- memcpy(m_image_buff->imageData,(void *)&(m_image_orig.pixels[0]),m_image_orig.pixels.length());
+ memcpy(m_image_buff.data,(void *)&(m_image_orig.pixels[0]),m_image_orig.pixels.length());
// Anternative actions
/* BGRからHSVに変換する */
- cvCvtColor(m_image_buff, m_hsvImage, CV_BGR2HSV);
+ cv::cvtColor(m_image_buff, m_hsvImage, CV_BGR2HSV);
/* HSV画像をH、S、V画像に分ける */
- cvSplit(m_hsvImage, m_hueImage, m_saturationImage, m_valueImage, NULL);
+ std::vector<cv::Mat> tmp;
+ tmp.push_back(m_hueImage);
+ tmp.push_back(m_saturationImage);
+ tmp.push_back(m_valueImage);
+ cv::split(m_hsvImage, tmp);
/* HとSの値を変更する */
- cvSet(m_hueImage, cvScalar( m_nHue ), NULL);
- cvSet(m_saturationImage, cvScalar( m_nSaturation ), NULL);
+ m_hueImage.setTo(cv::Scalar(m_nHue));
+ m_saturationImage.setTo(cv::Scalar(m_nSaturation));
+
+
/* 3チャンネルを結合 */
- cvMerge(m_hueImage, m_saturationImage, m_valueImage, NULL, m_mergeImage);
+ tmp.clear();
+ tmp.push_back(m_hueImage);
+ tmp.push_back(m_saturationImage);
+ tmp.push_back(m_valueImage);
+ cv::merge(tmp, m_mergeImage);
/* HSVからBGRに変換する */
- cvCvtColor(m_mergeImage, m_destinationImage, CV_HSV2BGR);
+ cv::cvtColor(m_mergeImage, m_destinationImage, CV_HSV2BGR);
/* 画像データのサイズ取得 */
- int len = m_destinationImage->nChannels * m_destinationImage->width * m_destinationImage->height;
+ int len = m_destinationImage.channels() * m_destinationImage.size().width * m_destinationImage.size().height;
/* 画面のサイズ情報を入れる */
m_image_sepia.pixels.length(len);
- m_image_sepia.width = m_destinationImage->width;
- m_image_sepia.height = m_destinationImage->height;
+ m_image_sepia.width = m_destinationImage.size().width;
+ m_image_sepia.height = m_destinationImage.size().height;
/* 反転した画像データをOutPortにコピー */
- memcpy((void *)&(m_image_sepia.pixels[0]), m_destinationImage->imageData,len);
+ memcpy((void *)&(m_image_sepia.pixels[0]), m_destinationImage.data,len);
m_image_sepiaOut.write();
}
Modified: trunk/ImageProcessing/opencv/components/SubStractCaptureImage/include/SubStractCaptureImage/SubStractCaptureImage.h
===================================================================
--- trunk/ImageProcessing/opencv/components/SubStractCaptureImage/include/SubStractCaptureImage/SubStractCaptureImage.h 2016-08-22 02:27:08 UTC (rev 180)
+++ trunk/ImageProcessing/opencv/components/SubStractCaptureImage/include/SubStractCaptureImage/SubStractCaptureImage.h 2017-02-02 00:05:08 UTC (rev 181)
@@ -19,9 +19,8 @@
#include <rtm/idl/ExtendedDataTypesSkel.h>
#include <rtm/idl/InterfaceDataTypesSkel.h>
-#include <cv.h>
-#include <cxcore.h>
-#include <highgui.h>
+#include <opencv2/imgproc/imgproc.hpp>
+#include <opencv2/highgui/highgui.hpp>
#define MASK_THRESHOLD 1 /* backgroundMaskImageやstillObjectMaskImageを生成するための閾値 */
#define THRESHOLD_MAX_VALUE 255 /* 2値化の際に使用する最大値 */
@@ -304,34 +303,34 @@
// </rtc-template>
- IplImage* inputImage;
- IplImage* backgroundAverageImage; /* 背景の平均値保存用IplImage */
- IplImage* backgroundThresholdImage; /* 背景の閾値保存用IplImage */
- IplImage* stillObjectAverageImage; /* 静止物体の平均値保存用IplImage */
- IplImage* stillObjectThresholdImage; /* 静止物体の閾値保存用IplImage */
- IplImage* stillObjectCounterImage; /* 静止物体のカウンタ用IplImage */
- IplImage* backgroundDifferenceImage; /* 背景差分画像用IplImage */
- IplImage* stillObjectDifferenceImage; /* 静止物体差分画像用IplIMage */
- IplImage* thresholdImage32; /* 32bitの閾値画像用IplImage */
- IplImage* thresholdImage; /* 閾値画像用IplImage */
- IplImage* resultImage; /* 結果画像用IplImage */
- IplImage* backgroundMaskImage; /* 背景マスク画像用IplImage */
- IplImage* foregroundMaskImage; /* 前景マスク用IplImage */
- IplImage* stillObjectMaskImage; /* 静止物体マスク用IplImage */
- IplImage* movingObjectMask; /* 動物体マスク用IplImage */
- IplImage* backgroundCopyMaskImage; /* 背景にコピーする際に使用するマスク用IplImage */
- IplImage* tmpMaskImage; /* テンポラリ用IplImage */
- IplImage* tmp2MaskImage; /* テンポラリ用IplImage(その2) */
- IplImage* frameImage32; /* 32bitのキャプチャした画像用IplImage */
- IplImage* backgroundImage; /* 背景画像用IplImage */
- IplImage* stillObjectImage; /* 静止物体画像用IplImage */
- IplImage* outputImage;
+ cv::Mat inputImage;
+ cv::Mat backgroundAverageImage; /* 背景の平均値保存用IplImage */
+ cv::Mat backgroundThresholdImage; /* 背景の閾値保存用IplImage */
+ cv::Mat stillObjectAverageImage; /* 静止物体の平均値保存用IplImage */
+ cv::Mat stillObjectThresholdImage; /* 静止物体の閾値保存用IplImage */
+ cv::Mat stillObjectCounterImage; /* 静止物体のカウンタ用IplImage */
+ cv::Mat backgroundDifferenceImage; /* 背景差分画像用IplImage */
+ cv::Mat stillObjectDifferenceImage; /* 静止物体差分画像用IplIMage */
+ cv::Mat thresholdImage32; /* 32bitの閾値画像用IplImage */
+ cv::Mat thresholdImage; /* 閾値画像用IplImage */
+ cv::Mat resultImage; /* 結果画像用IplImage */
+ cv::Mat backgroundMaskImage; /* 背景マスク画像用IplImage */
+ cv::Mat foregroundMaskImage; /* 前景マスク用IplImage */
+ cv::Mat stillObjectMaskImage; /* 静止物体マスク用IplImage */
+ cv::Mat movingObjectMask; /* 動物体マスク用IplImage */
+ cv::Mat backgroundCopyMaskImage; /* 背景にコピーする際に使用するマスク用IplImage */
+ cv::Mat tmpMaskImage; /* テンポラリ用IplImage */
+ cv::Mat tmp2MaskImage; /* テンポラリ用IplImage(その2) */
+ cv::Mat frameImage32; /* 32bitのキャプチャした画像用IplImage */
+ cv::Mat backgroundImage; /* 背景画像用IplImage */
+ cv::Mat stillObjectImage; /* 静止物体画像用IplImage */
+ cv::Mat outputImage;
- IplImage* foreGroundMaskBuff;
- IplImage* stillObjectMaskBuff;
- IplImage* backGroundBuff;
- IplImage* stillObjectImageBuff;
- IplImage* stillObjectCounterBuff;
+ cv::Mat foreGroundMaskBuff;
+ cv::Mat stillObjectMaskBuff;
+ cv::Mat backGroundBuff;
+ cv::Mat stillObjectImageBuff;
+ cv::Mat stillObjectCounterBuff;
int key; /* キー入力用の変数 */
};
Modified: trunk/ImageProcessing/opencv/components/SubStractCaptureImage/src/SubStractCaptureImage.cpp
===================================================================
--- trunk/ImageProcessing/opencv/components/SubStractCaptureImage/src/SubStractCaptureImage.cpp 2016-08-22 02:27:08 UTC (rev 180)
+++ trunk/ImageProcessing/opencv/components/SubStractCaptureImage/src/SubStractCaptureImage.cpp 2017-02-02 00:05:08 UTC (rev 181)
@@ -34,13 +34,15 @@
int g_temp_h = 0;
int SubStractCaptureImage_count = 0;
-void showFlipImage( char *windowName, IplImage *image ) {
+/*
+void showFlipImage(char *windowName, cv::Mat image) {
if ( image->origin == 0 ) {
//cvFlip( image, image, 0 );
cvShowImage( windowName, image );
//cvFlip( image, image, 0 );
}
}
+*/
/*!
* @brief constructor
@@ -125,34 +127,7 @@
g_temp_h = 0;
SubStractCaptureImage_count = 0;
- inputImage = NULL;
- backgroundAverageImage = NULL;
- backgroundThresholdImage = NULL;
- stillObjectAverageImage = NULL;
- stillObjectThresholdImage = NULL;
- stillObjectCounterImage = NULL;
- backgroundDifferenceImage = NULL;
- stillObjectDifferenceImage = NULL;
- thresholdImage32 = NULL;
- thresholdImage = NULL;
- resultImage = NULL;
- backgroundMaskImage = NULL;
- foregroundMaskImage = NULL;
- stillObjectMaskImage = NULL;
- movingObjectMask = NULL;
- backgroundCopyMaskImage = NULL;
- tmpMaskImage = NULL;
- tmp2MaskImage = NULL;
- frameImage32 = NULL;
- backgroundImage = NULL;
- stillObjectImage = NULL;
- outputImage = NULL;
- foreGroundMaskBuff = NULL;
- stillObjectMaskBuff = NULL;
- backGroundBuff = NULL;
- stillObjectImageBuff = NULL;
- stillObjectCounterBuff = NULL;
return RTC::RTC_OK;
}
@@ -160,89 +135,88 @@
RTC::ReturnCode_t SubStractCaptureImage::onDeactivated(RTC::UniqueId ec_id)
{
- if(inputImage != NULL){
- cvReleaseImage( &inputImage );
+
+
+
+
+ if (!inputImage.empty())
+ {
+ inputImage.release();
}
- if(backgroundAverageImage != NULL){
- cvReleaseImage( &backgroundAverageImage );
+ if (!backgroundAverageImage.empty())
+ {
+ backgroundAverageImage.release();
}
- if(backgroundThresholdImage != NULL){
- cvReleaseImage( &backgroundThresholdImage);
+ if (!backgroundThresholdImage.empty())
+ {
+ backgroundThresholdImage.release();
}
- if(stillObjectAverageImage != NULL){
- cvReleaseImage( &stillObjectAverageImage );
+ if (!stillObjectAverageImage.empty())
+ {
+ stillObjectAverageImage.release();
}
- if(stillObjectThresholdImage != NULL){
- cvReleaseImage( &stillObjectThresholdImage );
+ if (!stillObjectThresholdImage.empty())
+ {
+ stillObjectThresholdImage.release();
}
- if(stillObjectCounterImage != NULL){
- cvReleaseImage( &stillObjectCounterImage );
+ if (!backgroundDifferenceImage.empty())
+ {
+ backgroundDifferenceImage.release();
}
- if(backgroundDifferenceImage != NULL){
- cvReleaseImage( &backgroundDifferenceImage );
+ if (!stillObjectDifferenceImage.empty())
+ {
+ stillObjectDifferenceImage.release();
}
- if(stillObjectDifferenceImage != NULL){
- cvReleaseImage( &stillObjectDifferenceImage );
+ if (!backgroundCopyMaskImage.empty())
+ {
+ backgroundCopyMaskImage.release();
}
- if(thresholdImage32 != NULL){
- cvReleaseImage( &thresholdImage32 );
+ if (!tmpMaskImage.empty())
+ {
+ tmpMaskImage.release();
}
- if(thresholdImage != NULL){
- cvReleaseImage( &thresholdImage );
+ if (!tmp2MaskImage.empty())
+ {
+ tmp2MaskImage.release();
}
- if(resultImage != NULL){
- cvReleaseImage( &resultImage );
+ if (!frameImage32.empty())
+ {
+ frameImage32.release();
}
- if(backgroundMaskImage != NULL){
- cvReleaseImage( &backgroundMaskImage );
+ if (!backgroundImage.empty())
+ {
+ backgroundImage.release();
}
- if(foregroundMaskImage != NULL){
- cvReleaseImage( &foregroundMaskImage );
+ if (!stillObjectImage.empty())
+ {
+ stillObjectImage.release();
}
- if(stillObjectMaskImage != NULL){
- cvReleaseImage( &stillObjectMaskImage );
+ if (!outputImage.empty())
+ {
+ outputImage.release();
}
- if(movingObjectMask != NULL){
- cvReleaseImage( &movingObjectMask );
+ if (!foreGroundMaskBuff.empty())
+ {
+ foreGroundMaskBuff.release();
}
- if(backgroundCopyMaskImage != NULL){
- cvReleaseImage( &backgroundCopyMaskImage );
+ if (!stillObjectMaskBuff.empty())
+ {
+ stillObjectMaskBuff.release();
}
- if(tmpMaskImage != NULL){
- cvReleaseImage( &tmpMaskImage );
+ if (!backGroundBuff.empty())
+ {
+ backGroundBuff.release();
}
- if(tmp2MaskImage != NULL){
- cvReleaseImage( &tmp2MaskImage );
+ if (!stillObjectImageBuff.empty())
+ {
+ stillObjectImageBuff.release();
}
- if(frameImage32 != NULL){
- cvReleaseImage( &frameImage32 );
+ if (!stillObjectCounterBuff.empty())
+ {
+ stillObjectCounterBuff.release();
}
- if(backgroundImage != NULL){
- cvReleaseImage( &backgroundImage );
- }
- if(stillObjectImage != NULL){
- cvReleaseImage( &stillObjectImage );
- }
- if(outputImage != NULL){
- cvReleaseImage( &outputImage );
- }
- if(foreGroundMaskBuff != NULL){
- cvReleaseImage( &foreGroundMaskBuff);
- }
- if(stillObjectMaskBuff != NULL){
- cvReleaseImage( &stillObjectMaskBuff);
- }
- if(backGroundBuff != NULL){
- cvReleaseImage( &backGroundBuff);
- }
- if(stillObjectImageBuff != NULL){
- cvReleaseImage( &stillObjectImageBuff);
- }
- if(stillObjectCounterBuff != NULL){
- cvReleaseImage( &stillObjectCounterBuff);
- }
-
+
return RTC::RTC_OK;
}
@@ -256,47 +230,51 @@
if(g_temp_w != m_image_orig.width || g_temp_h != m_image_orig.height)
{
/* 画像サイズを保存 */
- imageSize = cvSize(m_image_orig.width, m_image_orig.height);
+ imageSize = cv::Size(m_image_orig.width, m_image_orig.height);
/* 画像を生成する */
- inputImage = cvCreateImage(imageSize, IPL_DEPTH_8U, 3);
- backgroundAverageImage = cvCreateImage( imageSize, IPL_DEPTH_32F, 3 ); /* 背景の平均値保存用IplImage */
- backgroundThresholdImage = cvCreateImage( imageSize, IPL_DEPTH_32F, 3 ); /* 背景の閾値保存用IplImage */
- stillObjectAverageImage = cvCreateImage( imageSize, IPL_DEPTH_32F, 3 ); /* 静止物体の平均値保存用IplImage */
- stillObjectThresholdImage = cvCreateImage( imageSize, IPL_DEPTH_32F, 3 ); /* 静止物体の閾値保存用IplImage */
- stillObjectCounterImage = cvCreateImage( imageSize, IPL_DEPTH_8U, 1 ); /* 静止物体のカウンタ用IplImage */
- backgroundDifferenceImage = cvCreateImage( imageSize, IPL_DEPTH_32F, 3 ); /* 背景差分画像用IplImage */
- stillObjectDifferenceImage = cvCreateImage( imageSize, IPL_DEPTH_32F, 3 ); /* 静止物体差分画像用IplIMage */
- thresholdImage32 = cvCreateImage( imageSize, IPL_DEPTH_32F, 3 ); /* 32bitの閾値画像用IplImage */
- thresholdImage = cvCreateImage( imageSize, IPL_DEPTH_8U, 3 ); /* 閾値画像用IplImage */
- resultImage = cvCreateImage( imageSize, IPL_DEPTH_8U, 1 ); /* 結果画像用IplImage */
- backgroundMaskImage = cvCreateImage( imageSize, IPL_DEPTH_8U, 1 ); /* 背景マスク画像用IplImage */
- foregroundMaskImage = cvCreateImage( imageSize, IPL_DEPTH_8U, 1 ); /* 前景マスク用IplImage */
- stillObjectMaskImage = cvCreateImage( imageSize, IPL_DEPTH_8U, 1 ); /* 静止物体マスク用IplImage */
- movingObjectMask = cvCreateImage( imageSize, IPL_DEPTH_8U, 1 ); /* 動物体マスク用IplImage */
- backgroundCopyMaskImage = cvCreateImage( imageSize, IPL_DEPTH_8U, 1 ); /* 背景にコピーする際に使用するマスク用IplImage */
- tmpMaskImage = cvCreateImage( imageSize, IPL_DEPTH_8U, 1 ); /* テンポラリ用IplImage */
- tmp2MaskImage = cvCreateImage( imageSize, IPL_DEPTH_8U, 1 ); /* テンポラリ用IplImage(その2) */
- frameImage32 = cvCreateImage( imageSize, IPL_DEPTH_32F, 3 ); /* 32bitのキャプチャした画像用IplImage */
- backgroundImage = cvCreateImage( imageSize, IPL_DEPTH_8U, 3 ); /* 背景画像用IplImage */
- stillObjectImage = cvCreateImage( imageSize, IPL_DEPTH_8U, 3 );
- outputImage = cvCreateImage(imageSize, IPL_DEPTH_8U, 3);
- foreGroundMaskBuff = cvCreateImage(imageSize, IPL_DEPTH_8U, 3);
- stillObjectMaskBuff = cvCreateImage(imageSize, IPL_DEPTH_8U, 3);
- backGroundBuff = cvCreateImage(imageSize, IPL_DEPTH_8U, 3);
- stillObjectImageBuff = cvCreateImage(imageSize, IPL_DEPTH_8U, 3);
- stillObjectCounterBuff = cvCreateImage(imageSize, IPL_DEPTH_8U, 3);
+
+ inputImage.create(imageSize, CV_8UC3); /* 背景の平均値保存用IplImage */
+ backgroundAverageImage.create(imageSize, CV_32FC3); /* 背景の閾値保存用IplImage */
+ backgroundThresholdImage.create(imageSize, CV_32FC3); /* 静止物体の平均値保存用IplImage */
+ stillObjectAverageImage.create(imageSize, CV_32FC3); /* 静止物体の閾値保存用IplImage */
+ stillObjectCounterImage.create(imageSize, CV_8UC1);/* 静止物体のカウンタ用IplImage */
+ stillObjectThresholdImage.create(imageSize, CV_32FC3);/* 背景差分画像用IplImage */
+ stillObjectCounterImage.create(imageSize, CV_32FC3); /* 静止物体差分画像用IplIMage */
+ backgroundDifferenceImage.create(imageSize, CV_32FC3); /* 32bitの閾値画像用IplImage */
+ stillObjectDifferenceImage.create(imageSize, CV_32FC3); /* 閾値画像用IplImage */
+ thresholdImage32.create(imageSize, CV_32FC3);/* 結果画像用IplImage */
+ thresholdImage.create(imageSize, CV_8UC3); /* 背景マスク画像用IplImage */
+ resultImage.create(imageSize, CV_8UC1); /* 前景マスク用IplImage */
+ backgroundMaskImage.create(imageSize, CV_8UC1); /* 静止物体マスク用IplImage */
+ foregroundMaskImage.create(imageSize, CV_8UC1); /* 動物体マスク用IplImage */
+ stillObjectMaskImage.create(imageSize, CV_8UC1); /* 背景にコピーする際に使用するマスク用IplImage */
+ movingObjectMask.create(imageSize, CV_8UC1); /* テンポラリ用IplImage */
+ backgroundCopyMaskImage.create(imageSize, CV_8UC1); /* テンポラリ用IplImage(その2) */
+ tmpMaskImage.create(imageSize, CV_8UC1); /* 32bitのキャプチャした画像用IplImage */
+ tmp2MaskImage.create(imageSize, CV_8UC1); /* 背景画像用IplImage */
+ frameImage32.create(imageSize, CV_32FC3);
+ backgroundImage.create(imageSize, CV_8UC3);
+ stillObjectImage.create(imageSize, CV_8UC3);
+ outputImage.create(imageSize, CV_8UC3);
+ foreGroundMaskBuff.create(imageSize, CV_8UC3);
+ stillObjectMaskBuff.create(imageSize, CV_8UC3);
+ backGroundBuff.create(imageSize, CV_8UC3);
+ stillObjectImageBuff.create(imageSize, CV_8UC3);
+ stillObjectCounterBuff.create(imageSize, CV_8UC3);
- memcpy(inputImage->imageData,(void *)&(m_image_orig.pixels[0]), m_image_orig.pixels.length());
+ memcpy(inputImage.data,(void *)&(m_image_orig.pixels[0]), m_image_orig.pixels.length());
/* 初期化する */
- cvConvert( inputImage, backgroundAverageImage );
- cvSet( backgroundThresholdImage, cvScalarAll( BACKGROUND_INITIAL_THRESHOLD ) );
- cvSetZero( stillObjectAverageImage );
- cvSetZero( stillObjectThresholdImage );
- cvSetZero( stillObjectCounterImage );
+ inputImage.convertTo(backgroundAverageImage, CV_32F);
+
+ backgroundThresholdImage.setTo(cv::Scalar::all(BACKGROUND_INITIAL_THRESHOLD));
+ stillObjectAverageImage = cv::Mat::zeros(imageSize, CV_32FC3);
+ stillObjectThresholdImage = cv::Mat::zeros(imageSize, CV_32FC3);
+ stillObjectCounterImage = cv::Mat::zeros(imageSize, CV_8UC1);
+
g_temp_w = m_image_orig.width;
g_temp_h = m_image_orig.height;
@@ -310,37 +288,42 @@
if(g_temp_w == m_image_orig.width && g_temp_h == m_image_orig.height)
{
- memcpy(inputImage->imageData,(void *)&(m_image_orig.pixels[0]), m_image_orig.pixels.length());
+
+ memcpy(inputImage.data,(void *)&(m_image_orig.pixels[0]), m_image_orig.pixels.length());
/* float 32bitに変換する */
- cvConvert( inputImage, frameImage32 );
+
+ inputImage.convertTo(frameImage32, CV_32F);
+
// 背景との差 /////////////////////////////////////////////////////////
/* 現在の背景との差の絶対値を成分ごとに取る */
- cvAbsDiff( frameImage32, backgroundAverageImage, backgroundDifferenceImage );
-
+ cv::absdiff( frameImage32, backgroundAverageImage, backgroundDifferenceImage );
+
/* 閾値の値を引く */
- cvAddWeighted( backgroundDifferenceImage, 1.0, backgroundThresholdImage, -THRESHOLD_COEFFICIENT, 0.0, thresholdImage32 );
+ cv::addWeighted( backgroundDifferenceImage, 1.0, backgroundThresholdImage, -THRESHOLD_COEFFICIENT, 0.0, thresholdImage32 );
/* thresholdImage の要素が1つでも0以上だったら背景ではない */
- cvConvert( thresholdImage32, thresholdImage );
- cvCvtColor( thresholdImage, resultImage, CV_BGR2GRAY );
- cvThreshold( resultImage, backgroundMaskImage, MASK_THRESHOLD, THRESHOLD_MAX_VALUE, CV_THRESH_BINARY_INV );
+ thresholdImage32.convertTo(thresholdImage, CV_8U);
+ cv::cvtColor(thresholdImage, resultImage, cv::COLOR_BGR2GRAY);
+ cv::threshold( resultImage, backgroundMaskImage, MASK_THRESHOLD, THRESHOLD_MAX_VALUE, cv::THRESH_BINARY_INV );
// 背景候補との差 /////////////////////////////////////////////////////
/* 現在の背景候補との差の絶対値を成分ごとに取る */
- cvAbsDiff( frameImage32, stillObjectAverageImage, stillObjectDifferenceImage );
+ cv::absdiff( frameImage32, stillObjectAverageImage, stillObjectDifferenceImage );
/* 閾値の値を引く */
- cvAddWeighted( stillObjectDifferenceImage, 1.0, stillObjectThresholdImage, -THRESHOLD_COEFFICIENT, 0.0, thresholdImage32 );
+ cv::addWeighted( stillObjectDifferenceImage, 1.0, stillObjectThresholdImage, -THRESHOLD_COEFFICIENT, 0.0, thresholdImage32 );
/* thresholdImage の要素が1つでも0以上だったら背景候補ではない */
- cvConvert( thresholdImage32, thresholdImage );
- cvCvtColor( thresholdImage, resultImage, CV_BGR2GRAY );
- cvThreshold( resultImage, stillObjectMaskImage, MASK_THRESHOLD, THRESHOLD_MAX_VALUE, CV_THRESH_BINARY_INV );
-
+ thresholdImage32.convertTo(thresholdImage, CV_8U);
+
+ cv::cvtColor(thresholdImage, resultImage, cv::COLOR_BGR2GRAY);
+
+ cv::threshold( resultImage, stillObjectMaskImage, MASK_THRESHOLD, THRESHOLD_MAX_VALUE, cv::THRESH_BINARY_INV );
+
/* ここまでで、
* backgroundDifferenceImage, backgroundMaskImage
* stillObjectDifferenceImage, stillObjectMaskImage
@@ -350,92 +333,129 @@
// 各種情報を更新する /////////////////////////////////////////////////
/* 背景に同化する場合 (backgroundMaskImage=1の場合) */
- cvRunningAvg( frameImage32, backgroundAverageImage, BACKGROUND_ALPHA, backgroundMaskImage );
- cvRunningAvg( backgroundDifferenceImage, backgroundThresholdImage, BACKGROUND_ALPHA, backgroundMaskImage );
+
+
+ cv::accumulateWeighted(backgroundAverageImage, frameImage32, BACKGROUND_ALPHA, backgroundMaskImage);
+ cv::accumulateWeighted(backgroundThresholdImage, backgroundDifferenceImage, BACKGROUND_ALPHA, backgroundMaskImage);
+
/* 背景候補に同化する場合 (backgroundMaskImage=0 && stillObjectMaskImage=1) */
- cvNot( backgroundMaskImage, foregroundMaskImage );
- cvAnd( foregroundMaskImage, stillObjectMaskImage, tmpMaskImage ); /* */
+ cv::bitwise_not(backgroundMaskImage, foregroundMaskImage);
+ //foregroundMaskImage.convertTo(foregroundMaskImage, CV_32F);
+
+ cv::bitwise_and(foregroundMaskImage, stillObjectMaskImage, tmpMaskImage); /* */
- cvRunningAvg( frameImage32, stillObjectAverageImage, STILL_OBJECT_ALPHA, tmpMaskImage );
- cvRunningAvg( stillObjectDifferenceImage, stillObjectThresholdImage, STILL_OBJECT_ALPHA, tmpMaskImage );
+ //tmpMaskImage.convertTo(tmpMaskImage, CV_8U);
- /* 背景候補カウンタを増やす */
- cvAddS( stillObjectCounterImage, cvScalar( 1 ), stillObjectCounterImage, tmpMaskImage );
+ cv::accumulateWeighted(frameImage32, stillObjectAverageImage, STILL_OBJECT_ALPHA, tmpMaskImage);
+ cv::accumulateWeighted(stillObjectDifferenceImage, stillObjectThresholdImage, STILL_OBJECT_ALPHA, tmpMaskImage);
+ /* 背景候補カウンタを増やす */
+
+ //backgroundMaskImage.convertTo(stillObjectCounterImage, CV_8U);
+
+
+ cv::add(stillObjectCounterImage, cv::Scalar::all(1), stillObjectCounterImage, tmpMaskImage);
+
/* カウンタが閾値以上になったら、背景候補を背景として採用する */
- cvThreshold( stillObjectCounterImage, tmp2MaskImage, STILL_OBJECT_TO_BACKGROUND, THRESHOLD_MAX_VALUE, CV_THRESH_BINARY );
- cvAnd( tmpMaskImage, tmp2MaskImage, backgroundCopyMaskImage );
- cvCopy( stillObjectAverageImage, backgroundAverageImage, backgroundCopyMaskImage );
- cvCopy( stillObjectThresholdImage, backgroundThresholdImage, backgroundCopyMaskImage );
+
+ cv::threshold(stillObjectCounterImage, tmp2MaskImage, STILL_OBJECT_TO_BACKGROUND, THRESHOLD_MAX_VALUE, cv::THRESH_BINARY);
+
+ //cv::cvtColor(tmp2MaskImage, tmp2MaskImage, cv::COLOR_BGR2GRAY);
+
+
+
+ cv::bitwise_and(tmpMaskImage, tmp2MaskImage, backgroundCopyMaskImage);
+
+ //backgroundCopyMaskImage.convertTo(tmpMaskImage, CV_8U);
+ stillObjectAverageImage.copyTo(backgroundAverageImage, backgroundCopyMaskImage);
+
+ stillObjectThresholdImage.copyTo(backgroundThresholdImage, backgroundCopyMaskImage);
+
/* この backgroundCopyMaskImage は、後で背景候補を0に初期化する際に使用 */
- cvSet( stillObjectCounterImage, cvScalarAll( 0 ), backgroundCopyMaskImage );
-
+ stillObjectCounterImage.setTo(cv::Scalar::all(0), backgroundCopyMaskImage);
+
/* 背景候補でもなく、背景でもない場合 */
/* (foregroundMaskImage = 1 && stillObjectMaskImage = 0) */
- cvNot( stillObjectMaskImage, movingObjectMask );
+ //stillObjectMaskImage.convertTo(stillObjectMaskImage, CV_8U);
+
+ cv::bitwise_not(stillObjectMaskImage, movingObjectMask);
+
/* カウンタを減らす(短時,間ノイズ対応)
* これは、背景に分類されたピクセルに対しても行う。すなわち、
* movingObjectMask == 1 || backgroundMaskImage == 1
*/
- cvOr( backgroundMaskImage, movingObjectMask, tmpMaskImage );
- cvSubS( stillObjectCounterImage, cvScalarAll( NOT_STILL_DEC_STEP ), stillObjectCounterImage, tmpMaskImage );
-
+ cv::bitwise_or(backgroundMaskImage, movingObjectMask, tmpMaskImage);
+
+ cv::subtract(stillObjectCounterImage, cv::Scalar::all(NOT_STILL_DEC_STEP), stillObjectCounterImage, tmpMaskImage);
+
/* カウンタが0になったら背景候補を初期化する */
- cvNot( stillObjectCounterImage, tmp2MaskImage ); /* tmp2 = 1 なら初期化 */
+ cv::bitwise_not(stillObjectCounterImage, tmp2MaskImage); /* tmp2 = 1 なら初期化 */
/* 初期化する値の候補は2種類ある。
* (1)現在の画像で初期化 --- 背景でもなく背景候補でもない場合
* (2)登録なし状態で初期化 --- 背景もしくは背景候補をコピーした場合
* ここでは(1)で初期化しておく
*/
- cvOr( tmpMaskImage, backgroundCopyMaskImage, tmpMaskImage );
- cvAnd( tmpMaskImage, tmp2MaskImage, tmpMaskImage );
+ cv::bitwise_or(tmpMaskImage, backgroundCopyMaskImage, tmpMaskImage);
+
+ cv::bitwise_and(tmpMaskImage, tmp2MaskImage, tmpMaskImage);
- cvCopy( frameImage32, stillObjectAverageImage, tmpMaskImage );
- cvSet( stillObjectThresholdImage, cvScalarAll( STILL_OBJECT_INITIAL_THRESHOLD ), tmpMaskImage );
+ frameImage32.copyTo(stillObjectAverageImage, tmpMaskImage);
+ stillObjectThresholdImage.setTo(cv::Scalar::all(STILL_OBJECT_INITIAL_THRESHOLD), tmpMaskImage);
+
/* ノイズを除去する */
- cvSmooth( foregroundMaskImage, foregroundMaskImage, CV_MEDIAN );
+ cv::medianBlur(foregroundMaskImage, foregroundMaskImage, 7);
- cvConvert( backgroundAverageImage, backgroundImage );
- cvConvert( stillObjectAverageImage, stillObjectImage );
+ backgroundAverageImage.convertTo(backgroundImage, CV_32F);
+ stillObjectAverageImage.convertTo(stillObjectImage, CV_32F);
+
cvWaitKey( 1 );
/* 画像データのサイズ取得 */
double len;
- len = (outputImage->nChannels * outputImage->width * outputImage->height);
+ len = (outputImage.channels() * outputImage.size().width * outputImage.size().height);
m_image_out.pixels.length(len);
- memcpy((void *)&(m_image_out.pixels[0]), inputImage->imageData, len);
+ memcpy((void *)&(m_image_out.pixels[0]), inputImage.data, len);
m_image_out.width = m_image_orig.width;
m_image_out.height = m_image_orig.height;
m_image_outOut.write();
- cvMerge( foregroundMaskImage, foregroundMaskImage, foregroundMaskImage, NULL, foreGroundMaskBuff);
+ std::vector<cv::Mat> tmp;
+ tmp.push_back(foregroundMaskImage);
+ tmp.push_back(foregroundMaskImage);
+ tmp.push_back(foregroundMaskImage);
- len = (foreGroundMaskBuff->nChannels * foreGroundMaskBuff->width * foreGroundMaskBuff->height);
+ cv::merge(tmp, foreGroundMaskBuff);
+
+ len = (foreGroundMaskBuff.channels() * foreGroundMaskBuff.size().width * foreGroundMaskBuff.size().height);
m_foreMaskImg.pixels.length(len);
- memcpy((void *)&(m_foreMaskImg.pixels[0]), foreGroundMaskBuff->imageData, len);
+ memcpy((void *)&(m_foreMaskImg.pixels[0]), foreGroundMaskBuff.data, len);
m_foreMaskImg.width = m_image_orig.width;
m_foreMaskImg.height = m_image_orig.height;
m_foreMaskImgOut.write();
+
+ tmp.clear();
+ tmp.push_back(stillObjectMaskImage);
+ tmp.push_back(stillObjectMaskImage);
+ tmp.push_back(stillObjectMaskImage);
-
- cvMerge( stillObjectMaskImage, stillObjectMaskImage, stillObjectMaskImage, NULL, stillObjectMaskBuff );
-
- len = (stillObjectMaskBuff->nChannels * stillObjectMaskBuff->width * stillObjectMaskBuff->height);
+ cv::merge(tmp, stillObjectMaskBuff);
+
+ len = (stillObjectMaskBuff.channels() * stillObjectMaskBuff.size().width * stillObjectMaskBuff.size().height);
m_stillMaskImg.pixels.length(len);
- memcpy((void *)&(m_stillMaskImg.pixels[0]), stillObjectMaskBuff->imageData, len);
+ memcpy((void *)&(m_stillMaskImg.pixels[0]), stillObjectMaskBuff.data, len);
m_stillMaskImg.width = m_image_orig.width;
m_stillMaskImg.height = m_image_orig.height;
@@ -443,10 +463,10 @@
m_stillMaskImgOut.write();
- len = (backgroundImage->nChannels * backgroundImage->width * backgroundImage->height);
+ len = (backgroundImage.channels() * backgroundImage.size().width * backgroundImage.size().height);
m_backGroundImg.pixels.length(len);
- memcpy((void *)&(m_backGroundImg.pixels[0]), backgroundImage->imageData, len);
+ memcpy((void *)&(m_backGroundImg.pixels[0]), backgroundImage.data, len);
m_backGroundImg.width = m_image_orig.width;
m_backGroundImg.height = m_image_orig.height;
@@ -454,10 +474,10 @@
m_backGroundImgOut.write();
- len = (stillObjectImage->nChannels * stillObjectImage->width * stillObjectImage->height);
+ len = (stillObjectImage.channels() * stillObjectImage.size().width * stillObjectImage.size().height);
m_stillImg.pixels.length(len);
- memcpy((void *)&(m_stillImg.pixels[0]), stillObjectImage->imageData, len);
+ memcpy((void *)&(m_stillImg.pixels[0]), stillObjectImage.data, len);
m_stillImg.width = m_image_orig.width;
m_stillImg.height = m_image_orig.height;
@@ -486,38 +506,7 @@
if(g_temp_w != m_image_orig.width || g_temp_h != m_image_orig.height)
{
- cvReleaseImage( &inputImage );
- cvReleaseImage( &backgroundAverageImage );
- cvReleaseImage( &backgroundThresholdImage);
- cvReleaseImage( &stillObjectAverageImage );
- cvReleaseImage( &stillObjectThresholdImage );
- cvReleaseImage( &stillObjectCounterImage );
- cvReleaseImage( &backgroundDifferenceImage );
- cvReleaseImage( &stillObjectDifferenceImage );
- cvReleaseImage( &thresholdImage32 );
- cvReleaseImage( &thresholdImage );
- cvReleaseImage( &resultImage );
- cvReleaseImage( &backgroundMaskImage );
- cvReleaseImage( &foregroundMaskImage );
- cvReleaseImage( &stillObjectMaskImage );
- cvReleaseImage( &movingObjectMask );
- cvReleaseImage( &backgroundCopyMaskImage );
- cvReleaseImage( &tmpMaskImage );
- cvReleaseImage( &tmp2MaskImage );
- cvReleaseImage( &frameImage32 );
- cvReleaseImage( &backgroundImage );
- cvReleaseImage( &stillObjectImage );
- cvReleaseImage( &outputImage );
- cvReleaseImage( &foreGroundMaskBuff);
- cvReleaseImage( &stillObjectMaskBuff);
- cvReleaseImage( &backGroundBuff);
- cvReleaseImage( &stillObjectImageBuff);
- cvReleaseImage( &stillObjectCounterBuff);
-
- //g_temp_w = m_image_orig.width;
- //g_temp_h = m_image_orig.height;
-
SubStractCaptureImage_count = 0;
}
Modified: trunk/ImageProcessing/opencv/components/Template/include/Template/Template.h
===================================================================
--- trunk/ImageProcessing/opencv/components/Template/include/Template/Template.h 2016-08-22 02:27:08 UTC (rev 180)
+++ trunk/ImageProcessing/opencv/components/Template/include/Template/Template.h 2017-02-02 00:05:08 UTC (rev 181)
@@ -20,9 +20,8 @@
#include <rtm/idl/InterfaceDataTypesSkel.h>
//OpenCV headr file include
-#include <cv.h>
-#include <cxcore.h>
-#include <highgui.h>
+#include <opencv2/imgproc/imgproc.hpp>
+#include <opencv2/highgui/highgui.hpp>
#define THRESHOLD 50 /* 2値化の際の閾値 */
#define THRESHOLD_MAX_VALUE 255 /* 2値化の際に使用する最大値 */
@@ -290,20 +289,20 @@
// <rtc-template block="private_operation">
// </rtc-template>
- int templateID;
+ std::string templateID;
int templateWidth, templateHeight;
int flag;
- IplImage* templateImage;
- IplImage* templateGrayImage;
- IplImage* templateBinaryImage;
+ cv::Mat templateImage;
+ cv::Mat templateGrayImage;
+ cv::Mat templateBinaryImage;
- IplImage* imageBuff;
- IplImage* sourceGrayImage;
- IplImage* sourceBinaryImage;
+ cv::Mat imageBuff;
+ cv::Mat sourceGrayImage;
+ cv::Mat sourceBinaryImage;
- IplImage* differenceMapImage;
+ cv::Mat differenceMapImage;
- CvPoint minLocation;
+ cv::Point minLocation;
int len;
Modified: trunk/ImageProcessing/opencv/components/Template/src/Template.cpp
===================================================================
--- trunk/ImageProcessing/opencv/components/Template/src/Template.cpp 2016-08-22 02:27:08 UTC (rev 180)
+++ trunk/ImageProcessing/opencv/components/Template/src/Template.cpp 2017-02-02 00:05:08 UTC (rev 181)
@@ -113,19 +113,13 @@
{
/* 対象画像用メモリの初期化 */
- templateID = -1;
+ templateID = "";
templateWidth = 0;
templateHeight = 0;
- templateImage = NULL;
- templateGrayImage = NULL;
- templateBinaryImage = NULL;
- /* イメージ用メモリの初期化 */
- imageBuff = NULL;
- sourceGrayImage = NULL;
- sourceBinaryImage = NULL;
- differenceMapImage = NULL;
+
+
/* OutPort1の画面サイズの初期化 */
m_image_template.width = 0;
m_image_template.height = 0;
@@ -140,22 +134,37 @@
RTC::ReturnCode_t Template::onDeactivated(RTC::UniqueId ec_id)
{
- if(imageBuff != NULL)
+ if (!imageBuff.empty())
{
- /* イメージ用メモリの解放 */
- cvReleaseImage(&imageBuff);
- cvReleaseImage(&sourceGrayImage);
- cvReleaseImage(&sourceBinaryImage);
- cvReleaseImage(&differenceMapImage);
+ imageBuff.release();
}
+ if (!sourceGrayImage.empty())
+ {
+ sourceGrayImage.release();
+ }
+ if (!sourceBinaryImage.empty())
+ {
+ sourceBinaryImage.release();
+ }
+ if (!differenceMapImage.empty())
+ {
+ differenceMapImage.release();
+ }
- if( templateImage != NULL )
+ if (!templateImage.empty())
{
- cvReleaseImage(&templateImage);
- cvReleaseImage(&templateGrayImage);
- cvReleaseImage(&templateBinaryImage);
+ templateImage.release();
}
+ if (!templateGrayImage.empty())
+ {
+ templateGrayImage.release();
+ }
+ if (!templateBinaryImage.empty())
+ {
+ templateBinaryImage.release();
+ }
+
return RTC::RTC_OK;
}
@@ -168,48 +177,49 @@
/* InPortデータの読み込み */
m_image_origIn.read();
- /* 対象画像を読み込む */
- templateImage = cvLoadImage( m_img_path, CV_LOAD_IMAGE_ANYDEPTH | CV_LOAD_IMAGE_ANYCOLOR );
+ if (templateID != m_img_path)
+ {
+ /* 対象画像を読み込む */
+ templateImage = cv::imread(m_img_path, CV_LOAD_IMAGE_ANYDEPTH | CV_LOAD_IMAGE_ANYCOLOR);
+ }
- if( templateImage == NULL )
+ if( templateImage.empty() )
{
- templateID = -1 ;
+ templateID = "";
templateWidth = templateHeight = 0;
}
/* 対象画像チェック */
/* 対象画像のPathとか名が無い場合テンプレートマッチングしなくて入力されたイメージをそのまま出力 */
- if( templateImage != NULL && templateID != templateImage->ID )
+ if (!templateImage.empty() && templateID != m_img_path)
{
+
/* フラッグ設定(正しい対象画像が入力) */
flag = 1;
- templateID = templateImage->ID;
- templateWidth = templateImage->width;
- templateHeight = templateImage->height;
+ templateID = m_img_path;
+ templateWidth = templateImage.size().width;
+ templateHeight = templateImage.size().height;
- if(templateGrayImage != NULL)
- {
- cvReleaseImage(&templateGrayImage);
- cvReleaseImage(&templateBinaryImage);
- }
+
/* 対象画像用のメモリ確保 */
- templateGrayImage = cvCreateImage( cvGetSize(templateImage), IPL_DEPTH_8U, 1 );
- templateBinaryImage = cvCreateImage( cvGetSize(templateImage), IPL_DEPTH_8U, 1 );
+ templateGrayImage.create(templateImage.size(), CV_8UC1);
+ templateBinaryImage.create(templateImage.size(), CV_8UC1);
+
cout << "templateID : "<<templateID<<endl;
cout << "template - width :"<<templateWidth<<endl;
cout << "template - height :"<<templateHeight<<endl;
/* RGBからグレースケールに変換する */
- cvCvtColor( templateImage, templateGrayImage, CV_RGB2GRAY );
+ cv::cvtColor( templateImage, templateGrayImage, CV_RGB2GRAY );
/* グレースケールから2値に変換する */
- cvThreshold( templateGrayImage, templateBinaryImage, THRESHOLD, THRESHOLD_MAX_VALUE, CV_THRESH_BINARY );
+ cv::threshold( templateGrayImage, templateBinaryImage, THRESHOLD, THRESHOLD_MAX_VALUE, cv::THRESH_BINARY );
/* OutPort2用の画面サイズ初期化 */
- m_image_picture.width = templateImage->width;
- m_image_picture.height = templateImage->height;
+ m_image_picture.width = templateImage.size().width;
+ m_image_picture.height = templateImage.size().height;
}
/* InPortとOutPortの画面サイズ処理およびイメージ用メモリの確保(正しい対象画像が入れるとdifferenceMapImageが変換される-フラッグを見て判断) */
@@ -219,44 +229,36 @@
m_image_template.width = m_image_orig.width;
m_image_template.height = m_image_orig.height;
- /* InPortのイメージサイズが変更された場合 */
- if(imageBuff != NULL)
- {
- cvReleaseImage(&imageBuff);
- cvReleaseImage(&sourceGrayImage);
- cvReleaseImage(&sourceBinaryImage);
- cvReleaseImage(&differenceMapImage);
- }
+
/* イメージ用メモリの確保 */
- imageBuff = cvCreateImage( cvSize(m_image_orig.width, m_image_orig.height), IPL_DEPTH_8U, 3 );
- sourceGrayImage = cvCreateImage( cvSize(m_image_orig.width, m_image_orig.height), IPL_DEPTH_8U, 1 );
- sourceBinaryImage = cvCreateImage( cvSize(m_image_orig.width, m_image_orig.height), IPL_DEPTH_8U, 1 );
- differenceMapImage = cvCreateImage( cvSize( m_image_orig.width - templateWidth + 1,
- m_image_orig.height - templateHeight + 1 ), IPL_DEPTH_32F, 1 );
+ imageBuff.create(cv::Size(m_image_orig.width, m_image_orig.height), CV_8UC3);
+ sourceGrayImage.create(cv::Size(m_image_orig.width, m_image_orig.height), CV_8UC1);
+ sourceBinaryImage.create(cv::Size(m_image_orig.width, m_image_orig.height), CV_8UC1);
+ differenceMapImage.create(cv::Size(m_image_orig.width - templateWidth + 1, m_image_orig.height - templateHeight + 1), CV_8UC1);
}
/* InPortの画像データをコピー */
- memcpy( imageBuff->imageData, (void *)&(m_image_orig.pixels[0]), m_image_orig.pixels.length() );
+ memcpy( imageBuff.data, (void *)&(m_image_orig.pixels[0]), m_image_orig.pixels.length() );
- if( templateImage != NULL )
+ if( templateImage.empty() )
{
/* RGBからグレースケールに変換する */
- cvCvtColor( imageBuff, sourceGrayImage, CV_RGB2GRAY );
+ cv::cvtColor( imageBuff, sourceGrayImage, CV_RGB2GRAY );
/* グレースケールから2値に変換する */
- cvThreshold( sourceGrayImage, sourceBinaryImage, THRESHOLD, THRESHOLD_MAX_VALUE, CV_THRESH_BINARY );
+ cv::threshold( sourceGrayImage, sourceBinaryImage, THRESHOLD, THRESHOLD_MAX_VALUE, cv::THRESH_BINARY );
/* テンプレートマッチングを行う */
- cvMatchTemplate( sourceBinaryImage, templateBinaryImage, differenceMapImage, CV_TM_SQDIFF );
+ cv::matchTemplate( sourceBinaryImage, templateBinaryImage, differenceMapImage, CV_TM_SQDIFF );
/* テンプレートが元画像のどの部分にあるのかという情報を得る */
- cvMinMaxLoc( differenceMapImage, NULL, NULL, &minLocation, NULL, NULL );
+ cv::minMaxLoc( differenceMapImage, NULL, NULL, &minLocation, NULL, NULL );
/* 一致する場所を元画像に四角で描く */
- cvRectangle(
+ cv::rectangle(
imageBuff,
minLocation,
- cvPoint( minLocation.x + templateImage->width, minLocation.y + templateImage->height ),
+ cv::Point(minLocation.x + templateImage.size().width, minLocation.y + templateImage.size().height),
CV_RGB( 255, 0, 0 ),
LINE_THICKNESS,
LINE_TYPE,
@@ -264,38 +266,37 @@
);
/* 画像データのサイズ取得 */
- len = imageBuff->nChannels * imageBuff->width * imageBuff->height;
+ len = imageBuff.channels() * imageBuff.size().width * imageBuff.size().height;
m_image_template.pixels.length(len);
/* 反転した画像データをOutPortにコピー */
- memcpy( (void *)&(m_image_template.pixels[0]), imageBuff->imageData, len );
+ memcpy( (void *)&(m_image_template.pixels[0]), imageBuff.data, len );
/* 反転した画像データをOutPortから出力 */
m_image_templateOut.write();
/* 対象画像データのサイズ取得 */
- len = templateImage->nChannels * templateImage->width * templateImage->height;
+ len = templateImage.channels() * templateImage.size().width * templateImage.size().height;
m_image_picture.pixels.length(len);
/* 反転した対象画像データをOutPortにコピー */
- memcpy( (void *)&(m_image_picture.pixels[0]), templateImage->imageData, len );
+ memcpy( (void *)&(m_image_picture.pixels[0]), templateImage.data, len );
m_image_pictureOut.write();
}else{
/* 画像データのサイズ取得 */
- len = imageBuff->nChannels * imageBuff->width * imageBuff->height;
+ len = imageBuff.channels() * imageBuff.size().width * imageBuff.size().height;
m_image_template.pixels.length(len);
/* 反転した画像データをOutPortにコピー */
- memcpy( (void *)&(m_image_template.pixels[0]), imageBuff->imageData, len );
+ memcpy( (void *)&(m_image_template.pixels[0]), imageBuff.data, len );
m_image_templateOut.write();
}
- cvReleaseImage(&templateImage);
}
return RTC::RTC_OK;
}
Modified: trunk/ImageProcessing/opencv/components/Translate/include/Translate/Translate.h
===================================================================
--- trunk/ImageProcessing/opencv/components/Translate/include/Translate/Translate.h 2016-08-22 02:27:08 UTC (rev 180)
+++ trunk/ImageProcessing/opencv/components/Translate/include/Translate/Translate.h 2017-02-02 00:05:08 UTC (rev 181)
@@ -20,9 +20,7 @@
#include <rtm/idl/InterfaceDataTypesSkel.h>
/* OpenCV用インクルードファイルのインクルード */
-#include<cv.h>
-#include<cxcore.h>
-#include<highgui.h>
+#include <opencv2/imgproc/imgproc.hpp>
// Service implementation headers
// <rtc-template block="service_impl_h">
@@ -288,10 +286,10 @@
int m_in_height; /* 入力イメージのHeight */
int m_in_width; /* 入力イメージのWidth */
- IplImage* m_image_buff; /* Original Image */
- IplImage* m_image_dest; /* 結果出力用IplImage */
+ cv::Mat m_image_buff; /* Original Image */
+ cv::Mat m_image_dest; /* 結果出力用IplImage */
- CvMat *m_transformMatrix;
+ cv::Mat m_transformMatrix;
};
Modified: trunk/ImageProcessing/opencv/components/Translate/src/Translate.cpp
===================================================================
--- trunk/ImageProcessing/opencv/components/Translate/src/Translate.cpp 2016-08-22 02:27:08 UTC (rev 180)
+++ trunk/ImageProcessing/opencv/components/Translate/src/Translate.cpp 2017-02-02 00:05:08 UTC (rev 181)
@@ -110,14 +110,12 @@
RTC::ReturnCode_t Translate::onActivated(RTC::UniqueId ec_id)
{
/* イメージ用メモリの確保 */
- m_image_buff = NULL;
- m_image_dest = NULL;
m_in_height = 0;
m_in_width = 0;
/* 行列を生成する */
- m_transformMatrix = cvCreateMat( 2, 3, CV_32FC1);
+ m_transformMatrix.create( 2, 3, CV_32FC1);
return RTC::RTC_OK;
}
@@ -125,16 +123,23 @@
RTC::ReturnCode_t Translate::onDeactivated(RTC::UniqueId ec_id)
{
- if(m_image_buff != NULL)
+
+
+ if (!m_image_buff.empty())
{
- cvReleaseImage(&m_image_buff);
- }
- if(m_image_dest != NULL)
+ m_image_buff.release();
+ }
+ if (!m_image_dest.empty())
{
- cvReleaseImage(&m_image_dest);
+ m_image_dest.release();
}
- cvReleaseMat(&m_transformMatrix);
+ if (!m_transformMatrix.empty())
+ {
+ m_transformMatrix.release();
+ }
+
+
return RTC::RTC_OK;
}
@@ -155,54 +160,46 @@
m_in_height = m_image_orig.height;
m_in_width = m_image_orig.width;
- if(m_image_buff != NULL)
- {
- cvReleaseImage(&m_image_buff);
- }
- if(m_image_dest != NULL)
- {
- cvReleaseImage(&m_image_dest);
- }
+
/* サイズ変換のためTempメモリーを用意する */
- m_image_buff = cvCreateImage(cvSize(m_in_width, m_in_height), IPL_DEPTH_8U, 3);
- m_image_dest = cvCreateImage(cvSize(m_in_width, m_in_height), IPL_DEPTH_8U, 3);
+ m_image_buff.create(cv::Size(m_in_width, m_in_height), CV_8UC3);
+ m_image_dest.create(cv::Size(m_in_width, m_in_height), CV_8UC3);
}
/* InPortの画像データをIplImageのimageDataにコピー */
- memcpy(m_image_buff->imageData,(void *)&(m_image_orig.pixels[0]),m_image_orig.pixels.length());
+ memcpy(m_image_buff.data,(void *)&(m_image_orig.pixels[0]),m_image_orig.pixels.length());
// Anternative process
- CvPoint2D32f original[3]; /* 変換前座標 */
- CvPoint2D32f Translate[3]; /* 変換後座標 */
+ cv::Point2f original[3]; /* 変換前座標 */
+ cv::Point2f Translate[3]; /* 変換後座標 */
/* 変換前の座標を設定する */
- original[0] = cvPoint2D32f( 0, 0 );
- original[1] = cvPoint2D32f( m_image_buff->width, 0 );
- original[2] = cvPoint2D32f( 0, m_image_buff->height );
+ original[0] = cv::Point2f(0, 0);
+ original[1] = cv::Point2f(m_image_buff.size().width, 0);
+ original[2] = cv::Point2f(0, m_image_buff.size().height);
/* 変換後の座標を設定する */
- Translate[0] = cvPoint2D32f( m_nTransX, m_nTransY );
- Translate[1] = cvPoint2D32f( m_nTransX + m_image_buff->width, m_nTransY );
- Translate[2] = cvPoint2D32f( m_nTransX, m_nTransY + m_image_buff->height );
+ Translate[0] = cv::Point2f(m_nTransX, m_nTransY);
+ Translate[1] = cv::Point2f(m_nTransX + m_image_buff.size().width, m_nTransY);
+ Translate[2] = cv::Point2f(m_nTransX, m_nTransY + m_image_buff.size().height);
/* 変換行列を求める */
- cvGetAffineTransform( original, Translate, m_transformMatrix );
+ m_transformMatrix = cv::getAffineTransform(original, Translate);
/* 変換行列を反映させる */
- cvWarpAffine( m_image_buff, m_image_dest, m_transformMatrix,
- CV_INTER_LINEAR | CV_WARP_FILL_OUTLIERS, cvScalarAll( 0 ) );
+ warpAffine(m_image_buff, m_image_dest, m_transformMatrix, m_image_dest.size(), cv::INTER_LINEAR, cv::BORDER_CONSTANT, cv::Scalar::all(0));
// Common process
/* 画像データのサイズ取得 */
- int len = m_image_dest->nChannels * m_image_dest->width * m_image_dest->height;
+ int len = m_image_dest.channels() * m_image_dest.size().width * m_image_dest.size().height;
/* 画面のサイズ情報を入れる */
m_image_output.pixels.length(len);
- m_image_output.width = m_image_dest->width;
- m_image_output.height = m_image_dest->height;
+ m_image_output.width = m_image_dest.size().width;
+ m_image_output.height = m_image_dest.size().height;
/* 反転した画像データをOutPortにコピー */
- memcpy((void *)&(m_image_output.pixels[0]), m_image_dest->imageData,len);
+ memcpy((void *)&(m_image_output.pixels[0]), m_image_dest.data,len);
/* 反転した画像データをOutPortから出力する */
m_image_outputOut.write();
More information about the openrtm-commit
mailing list