#include "StdAfx.h"
#include
#include
#include
#include
using namespace cv;
using namespace std;
int main()
{
string imageName = “lena.jpg”;
//读入图像
Mat img = imread(imageName, CV_LOAD_IMAGE_COLOR);
//如果读入图像失败
if (img.empty())
{
cout<<”Could not open or find the image!”<
#include “StdAfx.h”
#include
#include
#include
using namespace cv;
using namespace std;
int main()
{
char* imageName = “lena.jpg”;
Mat image = imread(imageName, 1);
if (!image.data)
{
cout<<”Could not open or find the image!”<
膨胀操作示例
#include
#include
#include
#include
using namespace std;
using namespace cv;
int main( )
{
//载入原图
Mat image = imread("1.jpg");
//创建窗口
namedWindow("原图-膨胀操作");
namedWindow("效果图-膨胀操作");
//显示原图
imshow("原图-膨胀操作", image);
//获取自定义核
Mat element = getStructuringElement(MORPH_RECT, Size(15, 15));
Mat out;
//进行膨胀操作
dilate(image,out, element);
//显示效果图
imshow("效果图-膨胀操作", out);
waitKey(0);
return 0;
}
#include
#include
#include
#include
using namespace std;
using namespace cv;
int main( )
{
//载入原图
Matimage = imread("1.jpg");
//创建窗口
namedWindow("原图-腐蚀操作");
namedWindow("效果图-腐蚀操作");
//显示原图
imshow("原图-腐蚀操作", image);
//获取自定义核
Mat element = getStructuringElement(MORPH_RECT, Size(15, 15));
Mat out;
//进行腐蚀操作
erode(image,out, element);
//显示效果图
imshow("效果图-腐蚀操作", out);
waitKey(0);
return 0;
}
#include
#include
#include
#include
using namespace std;
using namespace cv;
Mat g_srcImage, g_dstImage;//原始图和效果图
int g_nTrackbarNumer = 0;//0表示腐蚀erode, 1表示膨胀dilate
int g_nStructElementSize = 3; //结构元素(内核矩阵)的尺寸
void Process();//膨胀和腐蚀的处理函数
void on_TrackbarNumChange(int, void *);//回调函数
void on_ElementSizeChange(int, void *);//回调函数
int main( )
{
//改变console字体颜色
system("color5E");
//载入原图
g_srcImage= imread("1.jpg");
if(!g_srcImage.data ) { printf("Oh,no,读取srcImage错误~!\n"); return false; }
//显示原始图
namedWindow("原始图");
imshow("原始图", g_srcImage);
//进行初次腐蚀操作并显示效果图
namedWindow("效果图");
//获取自定义核
Matelement = getStructuringElement(MORPH_RECT, Size(2*g_nStructElementSize
+1,2*g_nStructElementSize+1),Point( g_nStructElementSize, g_nStructElementSize ));
erode(g_srcImage,g_dstImage, element);
imshow("效果图", g_dstImage);
//创建轨迹条
createTrackbar("腐蚀/膨胀", "效果图", &g_nTrackbarNumer, 1, on_TrackbarNumChange);
createTrackbar("内核尺寸", "效果图",&g_nStructElementSize, 21, on_ElementSizeChange);
//输出一些帮助信息
cout<
#include "cv.h"
#include "highgui.h"
#include "opencv2/imgproc/imgproc.hpp"
using namespace std;
using namespace cv;
#define TYPE_MORPH_RECT (0)
#define TYPE_MORPH_CROSS (1)
#define TYPE_MORPH_ELLIPSE (2)
#define MAX_ELE_TYPE (2)
#define MAX_ELE_SIZE (20)
Mat src, erode_dst, dilate_dst;
const char *erode_wn = "eroding demo";
const char *dilate_wn = "dilating demo";
int erode_ele_type;
int dilate_ele_type;
int erode_ele_size;
int dilate_ele_size;
static void Erosion(int, void *);
static void Dilation(int, void *);
/*
* @brief
* @inputs
* @outputs
* @retval
*/
int main(int argc, char *argv[])
{
if (argc < 2) {
cout<<"Usage: ./eroding_and_dilating [file name]"<
#include "widget.h"
#include "ui_widget.h"
#include
Widget::Widget(QWidget *parent) :
QWidget(parent),
ui(new Ui::Widget)
{
ui->setupUi(this);
}
Widget::~Widget()
{
delete ui;
}
void Widget::on_openButton_clicked()
{
QString fileName = QFileDialog::getOpenFileName(this,tr("Open Image"),
".",tr("Image Files (*.png *.jpg *.bmp)"));
qDebug()<<"filenames:"<imgfilelabel->setText(fileName);
//here use 2 ways to make a copy
// image.copyTo(originalimg); //make a copy
originalimg = image.clone(); //clone the img
qimg = Widget::Mat2QImage(image);
display(qimg); //display by the label
if(image.data)
{
ui->saltButton->setEnabled(true);
ui->originalButton->setEnabled(true);
ui->reduceButton->setEnabled(true);
}
}
QImage Widget::Mat2QImage(const cv::Mat &mat)
{
QImage img;
if(mat.channels()==3)
{
//cvt Mat BGR 2 QImage RGB
cvtColor(mat,rgb,CV_BGR2RGB);
img =QImage((const unsigned char*)(rgb.data),
rgb.cols,rgb.rows,
rgb.cols*rgb.channels(),
QImage::Format_RGB888);
}
else
{
img =QImage((const unsigned char*)(mat.data),
mat.cols,mat.rows,
mat.cols*mat.channels(),
QImage::Format_RGB888);
}
return img;
}
void Widget::display(QImage img)
{
QImage imgScaled;
imgScaled = img.scaled(ui->imagelabel->size(),Qt::KeepAspectRatio);
// imgScaled = img.QImage::scaled(ui->imagelabel->width(),ui->imagelabel->height
(),Qt::KeepAspectRatio);
ui->imagelabel->setPixmap(QPixmap::fromImage(imgScaled));
}
void Widget::on_originalButton_clicked()
{
qimg = Widget::Mat2QImage(originalimg);
display(qimg);
}
void Widget::on_saltButton_clicked()
{
salt(image,3000);
qimg = Widget::Mat2QImage(image);
display(qimg);
}
void Widget::on_reduceButton_clicked()
{
colorReduce0(image,64);
qimg = Widget::Mat2QImage(image);
display(qimg);
}
void Widget::salt(cv::Mat &image, int n)
{
int i,j;
for (int k=0; k(j,i)= 255;
}
else if (image.channels() == 3)
{ // color image
image.at(j,i)[0]= 255;
image.at(j,i)[1]= 255;
image.at(j,i)[2]= 255;
}
}
}
// using .ptr and []
void Widget::colorReduce0(cv::Mat &image, int div)
{
int nl= image.rows; // number of lines
int nc= image.cols * image.channels(); // total number of elements per line
for (int j=0; j(j);
for (int i=0; i
#ifndef WIDGET_H
#define WIDGET_H
#include
#include
#include
#include
#include
#include
#include
using namespace cv;
namespace Ui {
class Widget;
}
class Widget : public QWidget
{
Q_OBJECT
public:
explicit Widget(QWidget *parent = 0);
~Widget();
private slots:
void on_openButton_clicked();
QImage Mat2QImage(const cv::Mat &mat);
void display(QImage image);
void salt(cv::Mat &image, int n);
void on_saltButton_clicked();
void on_reduceButton_clicked();
void colorReduce0(cv::Mat &image, int div);
void on_originalButton_clicked();
private:
Ui::Widget *ui;
cv::Mat image;
cv::Mat originalimg; //store the original img
QImage qimg;
QImage imgScaled;
cv::Mat rgb;
};
#endif // WIDGET_H
#include
#include
#include
// using .ptr and []
void colorReduce0(cv::Mat &image, int div=64) {
int nl= image.rows; // number of lines
int nc= image.cols * image.channels(); // total number of elements per line
for (int j=0; j(j);
for (int i=0; i(j);
for (int i=0; i(j);
for (int i=0; i(log(static_cast(div))/log(2.0));
// mask used to round the pixel value
uchar mask= 0xFF<(j);
for (int i=0; i(log(static_cast(div))/log(2.0));
int step= image.step; // effective width
// mask used to round the pixel value
uchar mask= 0xFF<(log(static_cast(div))/log(2.0));
// mask used to round the pixel value
uchar mask= 0xFF<(j);
for (int i=0; i(log(static_cast(div))/log(2.0));
// mask used to round the pixel value
uchar mask= 0xFF<(j);
for (int i=0; i(log(static_cast(div))/log(2.0));
// mask used to round the pixel value
uchar mask= 0xFF<(j);
for (int i=0; i::iterator it= image.begin();
cv::Mat_::iterator itend= image.end();
for ( ; it!= itend; ++it) {
// process each pixel ---------------------
(*it)[0]= (*it)[0]/div*div + div/2;
(*it)[1]= (*it)[1]/div*div + div/2;
(*it)[2]= (*it)[2]/div*div + div/2;
// end of pixel processing ----------------
}
}
// using Mat_ iterator and bitwise
void colorReduce9(cv::Mat &image, int div=64) {
// div must be a power of 2
int n= static_cast(log(static_cast(div))/log(2.0));
// mask used to round the pixel value
uchar mask= 0xFF<::iterator it= image.begin();
cv::Mat_::iterator itend= image.end();
// scan all pixels
for ( ; it!= itend; ++it) {
// process each pixel ---------------------
(*it)[0]= (*it)[0]&mask + div/2;
(*it)[1]= (*it)[1]&mask + div/2;
(*it)[2]= (*it)[2]&mask + div/2;
// end of pixel processing ----------------
}
}
// using MatIterator_
void colorReduce10(cv::Mat &image, int div=64) {
// get iterators
cv::Mat_ cimage= image;
cv::Mat_::iterator it=cimage.begin();
cv::Mat_::iterator itend=cimage.end();
for ( ; it!= itend; it++) {
// process each pixel ---------------------
(*it)[0]= (*it)[0]/div*div + div/2;
(*it)[1]= (*it)[1]/div*div + div/2;
(*it)[2]= (*it)[2]/div*div + div/2;
// end of pixel processing ----------------
}
}
void colorReduce11(cv::Mat &image, int div=64) {
int nl= image.rows; // number of lines
int nc= image.cols; // number of columns
for (int j=0; j(j,i)[0]=image.at(j,i)[0]/div*div + div/2;
image.at(j,i)[1]=image.at(j,i)[1]/div*div + div/2;
image.at(j,i)[2]=image.at(j,i)[2]/div*div + div/2;
// end of pixel processing ----------------
} // end of line
}
}
// with input/ouput images
void colorReduce12(const cv::Mat &image, // input image
cv::Mat &result, // output image
int div=64) {
int nl= image.rows; // number of lines
int nc= image.cols ; // number of columns
// allocate output image if necessary
result.create(image.rows,image.cols,image.type());
// created images have no padded pixels
nc= nc*nl;
nl= 1; // it is now a 1D array
int n= static_cast(log(static_cast(div))/log(2.0));
// mask used to round the pixel value
uchar mask= 0xFF<(j);
const uchar* idata= image.ptr(j);
for (int i=0; i(log(static_cast(div))/log(2.0));
// mask used to round the pixel value
uchar mask= 0xFF<
sharp.h
#pragma once
#include
using namespace cv;
namespace ggicci
{
void sharpen(const Mat& img, Mat& result);
}
#include "sharp.h"
void ggicci::sharpen(const Mat& img, Mat& result)
{
result.create(img.size(), img.type());
//处理边界内部的像素点, 图像最外围的像素点应该额外处理
for (int row = 1; row < img.rows-1; row++)
{
//前一行像素点
const uchar* previous = img.ptr(row-1);
//待处理的当前行
const uchar* current = img.ptr(row);
//下一行
const uchar* next = img.ptr(row+1);
uchar *output = result.ptr(row);
int ch = img.channels();
int starts = ch;
int ends = (img.cols - 1) * ch;
for (int col = starts; col < ends; col++)
{
//输出图像的遍历指针与当前行的指针同步递增, 以每行的每一个像素点的每一个通道值
为一个递增量, 因为要考虑到图像的通道数
*output++ = saturate_cast(5 * current[col] - current[col-ch] - current
[col+ch] - previous[col] - next[col]);
}
} //end loop
//处理边界, 外围像素点设为 0
result.row(0).setTo(Scalar::all(0));
result.row(result.rows-1).setTo(Scalar::all(0));
result.col(0).setTo(Scalar::all(0));
result.col(result.cols-1).setTo(Scalar::all(0));
}
#include
#pragma comment(lib, "opencv_core231d.lib")
#pragma comment(lib, "opencv_highgui231d.lib")
#pragma comment(lib, "opencv_imgproc231d.lib")
using namespace cv;
#include "sharp.h"
int main()
{
Mat lena = imread("lena.jpg");
Mat sharpenedLena;
ggicci::sharpen(lena, sharpenedLena);
imshow("lena", lena);
imshow("sharpened lena", sharpenedLena);
cvWaitKey();
return 0;
}
int main()
{
Mat lena = imread("lena.jpg");
Mat sharpenedLena;
Mat kernel = (Mat_(3, 3) << 0, -1, 0, -1, 5, -1, 0, -1, 0);
cv::filter2D(lena, sharpenedLena, lena.depth(), kernel);
imshow("lena", lena);
imshow("sharpened lena", sharpenedLena);
cvWaitKey();
return 0;
}
int main()
{
Mat img = imread("lena.jpg", CV_LOAD_IMAGE_GRAYSCALE);
Mat* arrays = &img;
int narrays = 1;
int channels[] = { 0 };
InputArray mask = noArray();
Mat hist;
int dims = 1;
int histSize[] = { 256 };
float hranges[] = { 0.0, 255.0 };
const float *ranges[] = { hranges };
//调用 calcHist 计算直方图, 结果存放在 hist 中
calcHist(arrays, narrays, channels, mask, hist, dims, histSize, ranges);
//调用一个我自己写的简单的函数用于获取一张显示直方图数据的图片,
//输入参数为直方图数据 hist 和期望得到的图片的尺寸
Mat histImg = ggicci::getHistogram1DImage(hist, Size(600, 420));
imshow("lena gray image histogram", histImg);
waitKey();
}
Mat ggicci::getHistogram1DImage(const Mat& hist, Size imgSize)
{
Mat histImg(imgSize, CV_8UC3);
int Padding = 10;
int W = imgSize.width - 2 * Padding;
int H = imgSize.height - 2 * Padding;
double _max;
minMaxLoc(hist, NULL, &_max);
double Per = (double)H / _max;
const Point Orig(Padding, imgSize.height-Padding);
int bin = W / (hist.rows + 2);
//画方柱
for (int i = 1; i <= hist.rows; i++)
{
Point pBottom(Orig.x + i * bin, Orig.y);
Point pTop(pBottom.x, pBottom.y - Per * hist.at(i-1));
line(histImg, pBottom, pTop, Scalar(255, 0, 0), bin);
}
//画 3 条红线标明区域
line(histImg, Point(Orig.x + bin, Orig.y - H), Point(Orig.x + hist.rows * bin,
Orig.y - H), Scalar(0, 0, 255), 1);
line(histImg, Point(Orig.x + bin, Orig.y), Point(Orig.x + bin, Orig.y - H), Scalar
(0, 0, 255), 1);
line(histImg, Point(Orig.x + hist.rows * bin, Orig.y), Point(Orig.x + hist.rows *
bin, Orig.y - H), Scalar(0, 0, 255), 1);
drawArrow(histImg, Orig, Orig+Point(W, 0), 10, 30, Scalar::all(0), 2);
drawArrow(histImg, Orig, Orig-Point(0, H), 10, 30, Scalar::all(0), 2);
return histImg;
}
#include "stdafx.h"
#include
#include
#include
#include
using namespace std;
using namespace cv;
int main(int argc ,char ** argv)
{
IplImage *scr=0;
IplImage *dst=0;
double scale=4;
CvSize dst_cvsize;
if (argc==2&&(scr=cvLoadImage(argv[1],-1))!=0)
{
dst_cvsize.width=(int)(scr->width*scale);
dst_cvsize.height=(int)(scr->height*scale);
dst=cvCreateImage(dst_cvsize,scr->depth,scr->nChannels);
cvResize(scr,dst,CV_INTER_NN);//
// CV_INTER_NN - 最近邻插值,
// CV_INTER_LINEAR - 双线性插值 (缺省使用)
// CV_INTER_AREA - 使用象素关系重采样。当图像缩小时候,该方法可以避免波纹出现。
/*当图像放大时,类似于 CV_INTER_NN 方法..*/
// CV_INTER_CUBIC - 立方插值.
cvNamedWindow("scr",CV_WINDOW_AUTOSIZE);
cvNamedWindow("dst",CV_WINDOW_AUTOSIZE);
cvShowImage("scr",scr);
cvShowImage("dst",dst);
cvWaitKey();
cvReleaseImage(&scr);
cvReleaseImage(&dst);
cvDestroyWindow("scr");
cvDestroyWindow("dst");
}
return 0;
}
#include
#include
using namespace cv;
using namespace std;
int main(int argc, char ** argv)
{
// input args check
if(argc < 3){
printf("please input args.\n");
printf("e.g. : ./test infilepath outfilepath \n");
return 0;
}
char * input = argv[1];
char * output = argv[2];
printf("input: %s, output: %s\n", input, output);
Mat src = imread(input, 1);
int width=src.cols;
int heigh=src.rows;
RNG rng;
Mat img(src.size(),CV_8UC3);
for (int y=0; y(y);
uchar* P1 = img.ptr(y);
for (int x=0; x255)newB=255;
if(newG<0)newG=0;
if(newG>255)newG=255;
if(newR<0)newR=0;
if(newR>255)newR=255;
P1[3*x] = (uchar)newB;
P1[3*x+1] = (uchar)newG;
P1[3*x+2] = (uchar)newR;
}
}
//imshow("out",img);
waitKey();
imwrite(output,img);
}
#include
#include
#pragma comment( lib, "cv.lib" )
#pragma comment( lib, "cxcore.lib" )
#pragma comment( lib, "highgui.lib" )
int main()
{
IplImage *org=cvLoadImage("1.jpg",1);
IplImage *image=cvCloneImage(org);
int width=image->width;
int height=image->height;
int step=image->widthStep;
int channel=image->nChannels;
uchar* data=(uchar *)image->imageData;
for(int i=0;i255)
{
data[j*step+i*channel+k]=255;
}
else if(temp<0)
{
data[j*step+i*channel+k]=0;
}
else
{
data[j*step+i*channel+k]=temp;
}
}
}
}
cvNamedWindow("original",1);
cvShowImage("original",org);
cvNamedWindow("image",1);
cvShowImage("image",image);
cvWaitKey(0);
cvDestroyAllWindows();
cvReleaseImage(&image);
cvReleaseImage(&org);
return 0;
}
#include
#include
#pragma comment( lib, "cv.lib" )
#pragma comment( lib, "cxcore.lib" )
#pragma comment( lib, "highgui.lib" )
int main()
{
IplImage *org=cvLoadImage("lena.jpg",1);
IplImage *image=cvCloneImage(org);
int width=image->width;
int height=image->height;
int step=image->widthStep;
int channel=image->nChannels;
uchar* data=(uchar *)image->imageData;
int sign=-1;
for(int i=0;i
#include "stdafx.h"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include
#include "ComputeTime.h"
#include "windows.h"
using namespace std;
using namespace cv;
static void help()
{
cout << "\nThis program demonstrates GrabCut segmentation -- select an object in a
region\n"
"and then grabcut will attempt to segment it out.\n"
"Call:\n"
"./grabcut \n"
"\nSelect a rectangular area around the object you want to segment\n" <<
"\nHot keys: \n"
"\tESC - quit the program\n"
"\tr - restore the original image\n"
"\tn - next iteration\n"
"\n"
"\tleft mouse button - set rectangle\n"
"\n"
"\tCTRL+left mouse button - set GC_BGD pixels\n"
"\tSHIFT+left mouse button - set CG_FGD pixels\n"
"\n"
"\tCTRL+right mouse button - set GC_PR_BGD pixels\n"
"\tSHIFT+right mouse button - set CG_PR_FGD pixels\n" << endl;
}
const Scalar RED = Scalar(0,0,255);
const Scalar PINK = Scalar(230,130,255);
const Scalar BLUE = Scalar(255,0,0);
const Scalar LIGHTBLUE = Scalar(255,255,160);
const Scalar GREEN = Scalar(0,255,0);
const int BGD_KEY = CV_EVENT_FLAG_CTRLKEY; //Ctrl键
const int FGD_KEY = CV_EVENT_FLAG_SHIFTKEY; //Shift键
static void getBinMask( const Mat& comMask, Mat& binMask )
{
if( comMask.empty() || comMask.type()!=CV_8UC1 )
CV_Error( CV_StsBadArg, "comMask is empty or has incorrect type (not CV_8UC1)" );
if( binMask.empty() || binMask.rows!=comMask.rows || binMask.cols!=comMask.cols )
binMask.create( comMask.size(), CV_8UC1 );
binMask = comMask & 1; //得到mask的最低位,实际上是只保留确定的或者有可能的前景点当做
mask
}
class GCApplication
{
public:
enum{ NOT_SET = 0, IN_PROCESS = 1, SET = 2 };
static const int radius = 2;
static const int thickness = -1;
void reset();
void setImageAndWinName( const Mat& _image, const string& _winName );
void showImage() const;
void mouseClick( int event, int x, int y, int flags, void* param );
int nextIter();
int getIterCount() const { return iterCount; }
private:
void setRectInMask();
void setLblsInMask( int flags, Point p, bool isPr );
const string* winName;
const Mat* image;
Mat mask;
Mat bgdModel, fgdModel;
uchar rectState, lblsState, prLblsState;
bool isInitialized;
Rect rect;
vector fgdPxls, bgdPxls, prFgdPxls, prBgdPxls;
int iterCount;
};
/*给类的变量赋值*/
void GCApplication::reset()
{
if( !mask.empty() )
mask.setTo(Scalar::all(GC_BGD));
bgdPxls.clear(); fgdPxls.clear();
prBgdPxls.clear(); prFgdPxls.clear();
isInitialized = false;
rectState = NOT_SET; //NOT_SET == 0
lblsState = NOT_SET;
prLblsState = NOT_SET;
iterCount = 0;
}
/*给类的成员变量赋值而已*/
void GCApplication::setImageAndWinName( const Mat& _image, const string& _winName )
{
if( _image.empty() || _winName.empty() )
return;
image = &_image;
winName = &_winName;
mask.create( image->size(), CV_8UC1);
reset();
}
/*显示4个点,一个矩形和图像内容,因为后面的步骤很多地方都要用到这个函数,所以单独拿出来*/
void GCApplication::showImage() const
{
if( image->empty() || winName->empty() )
return;
Mat res;
Mat binMask;
if( !isInitialized )
image->copyTo( res );
else
{
getBinMask( mask, binMask );
image->copyTo( res, binMask ); //按照最低位是0还是1来复制,只保留跟前景有关的图像
,比如说可能的前景,可能的背景
}
vector::const_iterator it;
/*下面4句代码是将选中的4个点用不同的颜色显示出来*/
for( it = bgdPxls.begin(); it != bgdPxls.end(); ++it ) //迭代器可以看成是一个指针
circle( res, *it, radius, BLUE, thickness );
for( it = fgdPxls.begin(); it != fgdPxls.end(); ++it ) //确定的前景用红色表示
circle( res, *it, radius, RED, thickness );
for( it = prBgdPxls.begin(); it != prBgdPxls.end(); ++it )
circle( res, *it, radius, LIGHTBLUE, thickness );
for( it = prFgdPxls.begin(); it != prFgdPxls.end(); ++it )
circle( res, *it, radius, PINK, thickness );
/*画矩形*/
if( rectState == IN_PROCESS || rectState == SET )
rectangle( res, Point( rect.x, rect.y ), Point(rect.x + rect.width, rect.y +
rect.height ), GREEN, 2);
imshow( *winName, res );
}
/*该步骤完成后,mask图像中rect内部是3,外面全是0*/
void GCApplication::setRectInMask()
{
assert( !mask.empty() );
mask.setTo( GC_BGD ); //GC_BGD == 0
rect.x = max(0, rect.x);
rect.y = max(0, rect.y);
rect.width = min(rect.width, image->cols-rect.x);
rect.height = min(rect.height, image->rows-rect.y);
(mask(rect)).setTo( Scalar(GC_PR_FGD) ); //GC_PR_FGD == 3,矩形内部,为可能的前景点
}
void GCApplication::setLblsInMask( int flags, Point p, bool isPr )
{
vector *bpxls, *fpxls;
uchar bvalue, fvalue;
if( !isPr ) //确定的点
{
bpxls = &bgdPxls;
fpxls = &fgdPxls;
bvalue = GC_BGD; //0
fvalue = GC_FGD; //1
}
else //概率点
{
bpxls = &prBgdPxls;
fpxls = &prFgdPxls;
bvalue = GC_PR_BGD; //2
fvalue = GC_PR_FGD; //3
}
if( flags & BGD_KEY )
{
bpxls->push_back(p);
circle( mask, p, radius, bvalue, thickness ); //该点处为2
}
if( flags & FGD_KEY )
{
fpxls->push_back(p);
circle( mask, p, radius, fvalue, thickness ); //该点处为3
}
}
/*鼠标响应函数,参数flags为CV_EVENT_FLAG的组合*/
void GCApplication::mouseClick( int event, int x, int y, int flags, void* )
{
// TODO add bad args check
switch( event )
{
case CV_EVENT_LBUTTONDOWN: // set rect or GC_BGD(GC_FGD) labels
{
bool isb = (flags & BGD_KEY) != 0,
isf = (flags & FGD_KEY) != 0;
if( rectState == NOT_SET && !isb && !isf )//只有左键按下时
{
rectState = IN_PROCESS; //表示正在画矩形
rect = Rect( x, y, 1, 1 );
}
if ( (isb || isf) && rectState == SET ) //按下了alt键或者shift键,且画好了矩形
,表示正在画前景背景点
lblsState = IN_PROCESS;
}
break;
case CV_EVENT_RBUTTONDOWN: // set GC_PR_BGD(GC_PR_FGD) labels
{
bool isb = (flags & BGD_KEY) != 0,
isf = (flags & FGD_KEY) != 0;
if ( (isb || isf) && rectState == SET ) //正在画可能的前景背景点
prLblsState = IN_PROCESS;
}
break;
case CV_EVENT_LBUTTONUP:
if( rectState == IN_PROCESS )
{
rect = Rect( Point(rect.x, rect.y), Point(x,y) ); //矩形结束
rectState = SET;
setRectInMask();
assert( bgdPxls.empty() && fgdPxls.empty() && prBgdPxls.empty() &&
prFgdPxls.empty() );
showImage();
}
if( lblsState == IN_PROCESS ) //已画了前后景点
{
setLblsInMask(flags, Point(x,y), false); //画出前景点
lblsState = SET;
showImage();
}
break;
case CV_EVENT_RBUTTONUP:
if( prLblsState == IN_PROCESS )
{
setLblsInMask(flags, Point(x,y), true); //画出背景点
prLblsState = SET;
showImage();
}
break;
case CV_EVENT_MOUSEMOVE:
if( rectState == IN_PROCESS )
{
rect = Rect( Point(rect.x, rect.y), Point(x,y) );
assert( bgdPxls.empty() && fgdPxls.empty() && prBgdPxls.empty() &&
prFgdPxls.empty() );
showImage(); //不断的显示图片
}
else if( lblsState == IN_PROCESS )
{
setLblsInMask(flags, Point(x,y), false);
showImage();
}
else if( prLblsState == IN_PROCESS )
{
setLblsInMask(flags, Point(x,y), true);
showImage();
}
break;
}
}
/*该函数进行grabcut算法,并且返回算法运行迭代的次数*/
int GCApplication::nextIter()
{
if( isInitialized )
//使用grab算法进行一次迭代,参数2为mask,里面存的mask位是:矩形内部除掉那些可能是背
景或者已经确定是背景后的所有的点,且mask同时也为输出
//保存的是分割后的前景图像
grabCut( *image, mask, rect, bgdModel, fgdModel, 1 );
else
{
if( rectState != SET )
return iterCount;
if( lblsState == SET || prLblsState == SET )
grabCut( *image, mask, rect, bgdModel, fgdModel, 1, GC_INIT_WITH_MASK );
else
grabCut( *image, mask, rect, bgdModel, fgdModel, 1, GC_INIT_WITH_RECT );
isInitialized = true;
}
iterCount++;
bgdPxls.clear(); fgdPxls.clear();
prBgdPxls.clear(); prFgdPxls.clear();
return iterCount;
}
GCApplication gcapp;
static void on_mouse( int event, int x, int y, int flags, void* param )
{
gcapp.mouseClick( event, x, y, flags, param );
}
int main( int argc, char** argv )
{
string filename;
cout<<" Grabcuts ! \n";
cout<<"input image name: "<>filename;
Mat image = imread( filename, 1 );
if( image.empty() )
{
cout << "\n Durn, couldn't read image filename " << filename << endl;
return 1;
}
help();
const string winName = "image";
cvNamedWindow( winName.c_str(), CV_WINDOW_AUTOSIZE );
cvSetMouseCallback( winName.c_str(), on_mouse, 0 );
gcapp.setImageAndWinName( image, winName );
gcapp.showImage();
for(;;)
{
int c = cvWaitKey(0);
switch( (char) c )
{
case '\x1b':
cout << "Exiting ..." << endl;
goto exit_main;
case 'r':
cout << endl;
gcapp.reset();
gcapp.showImage();
break;
case 'n':
ComputeTime ct ;
ct.Begin();
int iterCount = gcapp.getIterCount();
cout << "<" << iterCount << "... ";
int newIterCount = gcapp.nextIter();
if( newIterCount > iterCount )
{
gcapp.showImage();
cout << iterCount << ">" << endl;
cout<<"运行时间: "<" << endl;
break;
}
}
exit_main:
cvDestroyWindow( winName.c_str() );
return 0;
}
lszySnapping.cpp
LazySnapping.cpp
#include "stdafx.h"
#include
#include
#include "graph.h"
#include
#include
#include
#include
using namespace std;
typedef Graph GraphType;
class LasySnapping
{
public :
LasySnapping();
~LasySnapping()
{
if(graph)
{
delete graph;
}
};
private :
vector forePts;
vector backPts;
IplImage* image;
// average color of foreground points
unsigned char avgForeColor[3];
// average color of background points
unsigned char avgBackColor[3];
public :
void setImage(IplImage* image)
{
this->image = image;
graph = new GraphType(image->width*image->height,image->width*image->height*2);
}
// include-pen locus
void setForegroundPoints(vector pts)
{
forePts.clear();
for(int i =0; i< pts.size(); i++)
{
if(!isPtInVector(pts[i],forePts))
{
forePts.push_back(pts[i]);
}
}
if(forePts.size() == 0)
{
return;
}
int sum[3] = {0};
for(int i =0; i < forePts.size(); i++)
{
unsigned char* p = (unsigned char*)image->imageData + forePts[i].x * 3
+ forePts[i].y*image->widthStep;
sum[0] += p[0];
sum[1] += p[1];
sum[2] += p[2];
}
cout< pts)
{
backPts.clear();
for(int i =0; i< pts.size(); i++)
{
if(!isPtInVector(pts[i],backPts))
{
backPts.push_back(pts[i]);
}
}
if(backPts.size() == 0)
{
return;
}
int sum[3] = {0};
for(int i =0; i < backPts.size(); i++)
{
unsigned char* p = (unsigned char*)image->imageData + backPts[i].x * 3 +
backPts[i].y*image->widthStep;
sum[0] += p[0];
sum[1] += p[1];
sum[2] += p[2];
}
avgBackColor[0] = sum[0]/backPts.size();
avgBackColor[1] = sum[1]/backPts.size();
avgBackColor[2] = sum[2]/backPts.size();
}
// return maxflow of graph
int runMaxflow();
// get result, a grayscale mast image indicating forground by 255 and background by 0
IplImage* getImageMask();
private :
float colorDistance(unsigned char* color1, unsigned char* color2);
float minDistance(unsigned char* color, vector points);
bool isPtInVector(CvPoint pt, vector points);
void getE1(unsigned char* color,float* energy);
float getE2(unsigned char* color1,unsigned char* color2);
GraphType *graph;
};
LasySnapping::LasySnapping()
{
graph = NULL;
avgForeColor[0] = 0;
avgForeColor[1] = 0;
avgForeColor[2] = 0;
avgBackColor[0] = 0;
avgBackColor[1] = 0;
avgBackColor[2] = 0;
}
float LasySnapping::colorDistance(unsigned char* color1, unsigned char* color2)
{
return sqrt(((float)color1[0]-(float)color2[0])*((float)color1[0]-(float)color2[0])+
((float)color1[1]-(float)color2[1])*((float)color1[1]-(float)color2[1])+
((float)color1[2]-(float)color2[2])*((float)color1[2]-(float)color2[2]));
}
float LasySnapping::minDistance(unsigned char* color, vector points)
{
float distance = -1;
for(int i =0 ; i < points.size(); i++)
{
unsigned char* p = (unsigned char*)image->imageData + points[i].y * image-
>widthStep +
points[i].x * image->nChannels;
float d = colorDistance(p,color);
if(distance < 0 )
{
distance = d;
}
else
{
if(distance > d)
{
distance = d;
}
}
}
return distance;
}
bool LasySnapping::isPtInVector(CvPoint pt, vector points)
{
for(int i =0 ; i < points.size(); i++)
{
if(pt.x == points[i].x && pt.y == points[i].y)
{
return true;
}
}
return false;
}
void LasySnapping::getE1(unsigned char* color,float* energy)
{
// average distance
float df = colorDistance(color,avgForeColor);
float db = colorDistance(color,avgBackColor);
// min distance from background points and forground points
// float df = minDistance(color,forePts);
// float db = minDistance(color,backPts);
energy[0] = df/(db+df);
energy[1] = db/(db+df);
}
float LasySnapping::getE2(unsigned char* color1,unsigned char* color2)
{
const float EPSILON = 0.01;
float lambda = 100;
return lambda/(EPSILON+
(color1[0]-color2[0])*(color1[0]-color2[0])+
(color1[1]-color2[1])*(color1[1]-color2[1])+
(color1[2]-color2[2])*(color1[2]-color2[2]));
}
int LasySnapping::runMaxflow()
{
const float INFINNITE_MAX = 1e10;
int indexPt = 0;
for(int h = 0; h < image->height; h ++)
{
unsigned char* p = (unsigned char*)image->imageData + h *image->widthStep;
for(int w = 0; w < image->width; w ++)
{
// calculate energe E1
float e1[2]={0};
if(isPtInVector(cvPoint(w,h),forePts))
{
e1[0] =0;
e1[1] = INFINNITE_MAX;
}
else if
(isPtInVector(cvPoint(w,h),backPts))
{
e1[0] = INFINNITE_MAX;
e1[1] = 0;
}
else
{
getE1(p,e1);
}
// add node
graph->add_node();
graph->add_tweights(indexPt, e1[0],e1[1]);
// add edge, 4-connect
if(h > 0 && w > 0)
{
float e2 = getE2(p,p-3);
graph->add_edge(indexPt,indexPt-1,e2,e2);
e2 = getE2(p,p-image->widthStep);
graph->add_edge(indexPt,indexPt-image->width,e2,e2);
}
p+= 3;
indexPt ++;
}
}
return graph->maxflow();
}
IplImage* LasySnapping::getImageMask()
{
IplImage* gray = cvCreateImage(cvGetSize(image),8,1);
int indexPt =0;
for(int h =0; h < image->height; h++)
{
unsigned char* p = (unsigned char*)gray->imageData + h*gray->widthStep;
for(int w =0 ;w width; w++)
{
if (graph->what_segment(indexPt) == GraphType::SOURCE)
{
*p = 0;
}
else
{
*p = 255;
}
p++;
indexPt ++;
}
}
return gray;
}
// global
vector forePts;
vector backPts;
int currentMode = 0;// indicate foreground or background, foreground as default
CvScalar paintColor[2] = {CV_RGB(0,0,255),CV_RGB(255,0,0)};
IplImage* image = NULL;
char* winName = "lazySnapping";
IplImage* imageDraw = NULL;
const int SCALE = 4;
void on_mouse( int event, int x, int y, int flags, void* )
{
if( event == CV_EVENT_LBUTTONUP )
{
if(backPts.size() == 0 && forePts.size() == 0)
{
return;
}
LasySnapping ls;
IplImage* imageLS = cvCreateImage(cvSize(image->width/SCALE,image->height/SCALE),
8,3);
cvResize(image,imageLS);
ls.setImage(imageLS);
ls.setBackgroundPoints(backPts);
ls.setForegroundPoints(forePts);
ls.runMaxflow();
IplImage* mask = ls.getImageMask();
IplImage* gray = cvCreateImage(cvGetSize(image),8,1);
cvResize(mask,gray);
// edge
cvCanny(gray,gray,50,150,3);
IplImage* showImg = cvCloneImage(imageDraw);
for(int h =0; h < image->height; h ++)
{
unsigned char* pgray = (unsigned char*)gray->imageData + gray->widthStep*h;
unsigned char* pimage = (unsigned char*)showImg->imageData + showImg-
>widthStep*h;
for(int width =0; width < image->width; width++)
{
if(*pgray++ != 0 )
{
pimage[0] = 0;
pimage[1] = 255;
pimage[2] = 0;
}
pimage+=3;
}
}
cvSaveImage("t.bmp",showImg);
cvShowImage(winName,showImg);
cvReleaseImage(&imageLS);
cvReleaseImage(&mask);
cvReleaseImage(&showImg);
cvReleaseImage(&gray);
}
else if( event == CV_EVENT_LBUTTONDOWN )
{
}
else if( event == CV_EVENT_MOUSEMOVE && (flags & CV_EVENT_FLAG_LBUTTON))
{
CvPoint pt = cvPoint(x,y);
if(currentMode == 0)
{//foreground
forePts.push_back(cvPoint(x/SCALE,y/SCALE));
}
else
{//background
backPts.push_back(cvPoint(x/SCALE,y/SCALE));
}
cvCircle(imageDraw,pt,2,paintColor[currentMode]);
cvShowImage(winName,imageDraw);
}
}
int main(int argc, char** argv)
{
//if(argc != 2)
//{
// cout<<"command : lazysnapping inputImage"<>image_name;
cvNamedWindow(winName,1);
cvSetMouseCallback( winName, on_mouse, 0);
image = cvLoadImage(image_name.c_str(),CV_LOAD_IMAGE_COLOR);
imageDraw = cvCloneImage(image);
cvShowImage(winName, image);
for(;;)
{
int c = cvWaitKey(0);
c = (char)c;
if(c == 27)
{//exit
break;
}
else if(c == 'r')
{//reset
image = cvLoadImage(image_name.c_str(),CV_LOAD_IMAGE_COLOR);
imageDraw = cvCloneImage(image);
forePts.clear();
backPts.clear();
currentMode = 0;
cvShowImage(winName, image);
}
else if(c == 'b')
{//change to background selection
currentMode = 1;
}else if(c == 'f')
{//change to foreground selection
currentMode = 0;
}
}
cvReleaseImage(&image);
cvReleaseImage(&imageDraw);
return 0;
}
#include "stdafx.h"
#include
#include
#include "CvxText.h"
#pragma comment(lib,"freetype255d.lib")
#pragma comment(lib,"opencv_core2410d.lib")
#pragma comment(lib,"opencv_highgui2410d.lib")
#pragma comment(lib,"opencv_imgproc2410d.lib")
using namespace std;
using namespace cv;
#define ROW_BLOCK 2
#define COLUMN_Block 2
writePng.cpp : 定义控制台应用程序的入口点。
int run_test_png(Mat &mat,string image_name)
{
/*采用自己设置的参数来保存图片*/
//Mat mat(480, 640, CV_8UC4);
//createAlphaMat(mat);
vector compression_params;
compression_params.push_back(CV_IMWRITE_PNG_COMPRESSION);
compression_params.push_back(9); //png格式下,默认的参数为3.
try
{
imwrite(image_name, mat, compression_params);
}
catch (runtime_error& ex)
{
fprintf(stderr, "Exception converting image to PNG format: %s\n", ex.what());
return 1;
}
fprintf(stdout, "Saved PNG file with alpha data.\n");
waitKey(0);
return 0;
}
int coloured(Mat &template_src, Mat &mat_png, CvScalar color)
{
for (int i = 0; i < template_src.rows; ++i)
{
for (int j = 0; j < template_src.cols; ++j)
{
Vec4b& bgra = mat_png.at(i, j);
//int temp = template_src.at(i,j);
if (template_src.at(i,j)== 0)
{
bgra[0] = color.val[0]; //b通道
bgra[1] = color.val[1]; //g通道
bgra[2] = color.val[2]; //r通道
bgra[3] = 255;//alpha通道全部设置为透明完全透明为0,否则为255
}
else
{
bgra[3] = 0;//alpha通道全部设置为透明完全透明为0,否则为255
}
}
}
return 0;
}
void ImageBinarization(IplImage *src)
{ /*对灰度图像二值化,自适应门限threshold*/
int i,j,width,height,step,chanel,threshold;
/*size是图像尺寸,svg是灰度直方图均值,va是方差*/
float size,avg,va,maxVa,p,a,s;
unsigned char *dataSrc;
float histogram[256];
width = src->width;
height = src->height;
dataSrc = (unsigned char *)src->imageData;
step = src->widthStep/sizeof(char);
chanel = src->nChannels;
/*计算直方图并归一化histogram*/
for(i=0; i<256; i++)
histogram[i] = 0;
for(i=0; i maxVa)
{
threshold = i;
maxVa = s;
}
}
/*二值化*/
for(i=0; i threshold)
dataSrc[i*step+j] = 255;
else
dataSrc[i*step+j] = 0;
}
}
Mat binaryzation(Mat &src)
{
Mat des_gray(src.size(),CV_8UC1);
cvtColor(src,des_gray,CV_BGR2GRAY);
//Mat bin_mat();
IplImage temp(des_gray);
ImageBinarization(&temp);
//threshold(des_gray,des_gray,150,255,THRESH_BINARY);
imshow("二值图像",des_gray);
return des_gray;
}
int generate_chinese(const int size_zi, const char *msg ,int number,CvScalar color)
{
//int size_zi = 50;//字体大小
CvSize czSize; //目标图像尺寸
float p = 0.5;
CvScalar fsize;
//读取TTF字体文件
CvxText text("simhei.ttf");
//设置字体属性 字体大小/空白比例/间隔比例/旋转角度
fsize = cvScalar(size_zi, 1, 0.1, 0);
text.setFont(NULL, &fsize, NULL, &p);
czSize.width = size_zi*number;
czSize.height = size_zi;
//加载原图像
IplImage* ImageSrc = cvCreateImage(czSize,IPL_DEPTH_8U,3);//cvLoadImage(Imagename,
CV_LOAD_IMAGE_UNCHANGED);
//Mat image(ImageSrc);
//createAlphaMat(image);
//ImageSrc = ℑ
//IplImage temp(image);
//ImageSrc = &temp;
//设置原图像文字
text.putText(ImageSrc, msg, cvPoint(1, size_zi), color);
//显示原图像
cvShowImage("原图", ImageSrc);
string hanzi = msg;
hanzi = hanzi + ".png";
Mat chinese(ImageSrc,true);
Mat gray = binaryzation(chinese);
imwrite("chinese_gray.jpg",gray);
Mat mat_png(chinese.size(),CV_8UC4);
coloured(gray,mat_png,color);
run_test_png(mat_png,hanzi);
//
////cvSaveImage("hanzi.jpg",reDstImage);
//run_test_png(chinese,hanzi);
//等待按键事件
cvWaitKey();
return 0;
}
int main()
{
CvScalar color = CV_RGB(0,0,0);
int size = 200;
const char* msg = "你好a";//暂时一行字不要太长
int number = 3;//字符个数
generate_chinese(size,msg,number,color);
return 0;
}