怎样用qt+opencv基于opencv的图像处理理

5049人阅读
图像处理(94)
opencv(24)
1).pro文件#-------------------------------------------------
# Project created by QtCreator T12:56:52
#-------------------------------------------------
+= core gui
greaterThan(QT_MAJOR_VERSION, 4): QT += widgets
TARGET = myQTDemo
TEMPLATE = app
SOURCES += main.cpp\
mainwindow.cpp
+= mainwindow.h
+= mainwindow.ui
INCLUDEPATH+=d:\opencv249\include\opencv\
d:\opencv249\include\opencv2\
d:\opencv249\include
LIBS+=d:\opencv249\lib\libopencv_calib3d249.dll.a\
d:\opencv249\lib\libopencv_contrib249.dll.a\
d:\opencv249\lib\libopencv_core249.dll.a\
d:\opencv249\lib\libopencv_features2d249.dll.a\
d:\opencv249\lib\libopencv_flann249.dll.a\
d:\opencv249\lib\libopencv_gpu249.dll.a\
d:\opencv249\lib\libopencv_highgui249.dll.a\
d:\opencv249\lib\libopencv_imgproc249.dll.a\
d:\opencv249\lib\libopencv_legacy249.dll.a\
d:\opencv249\lib\libopencv_ml249.dll.a\
d:\opencv249\lib\libopencv_objdetect249.dll.a\
d:\opencv249\lib\libopencv_video249.dll.a
2).h文件#ifndef MAINWINDOW_H
#define MAINWINDOW_H
#include &QMainWindow&
#include &opencv2/core/core.hpp&
#include &opencv2/highgui/highgui.hpp&
namespace Ui {
class MainW
class MainWindow : public QMainWindow
explicit MainWindow(QWidget *parent = 0);
~MainWindow();
cv::Mat image,image2;
private slots:
void on_pushButton_clicked();
void on_pushButton_2_clicked();
Ui::MainWindow *
#endif // MAINWINDOW_H
3).cpp文件#include &mainwindow.h&
#include &ui_mainwindow.h&
#include &mainwindow.h&
#include &ui_mainwindow.h&
#include &QString&
#include &QFileDialog&
#include &QMessageBox&
#include &opencv/cv.h&
#include &QTextCodec&
MainWindow::MainWindow(QWidget *parent) :
QMainWindow(parent),
ui(new Ui::MainWindow)
ui-&setupUi(this);
MainWindow::~MainWindow()
void MainWindow::on_pushButton_clicked()
QString filename = QFileDialog::getOpenFileName(this,tr(&Open Image&),&&,tr(&Image File(*.bmp *.jpg *.jpeg *.png)&));
QTextCodec *code = QTextCodec::codecForName(&gb18030&);
std::string name = code-&fromUnicode(filename).data();
image = cv::imread(name);
if(!image.data)
QMessageBox msgB
msgBox.setText(tr(&image data is null&));
msgBox.exec();
cv::cvtColor(image,image2,CV_BGR2RGB);
img = QImage((const unsigned char*)(image2.data),image.cols,image.rows, image.step,
QImage::Format_RGB888);
ui-&label-&clear();
ui-&label-&setPixmap(QPixmap::fromImage(img));
//ui-&processPushButton-&setEnabled(true);
ui-&label-&resize(ui-&label-&pixmap()-&size());
void MainWindow::on_pushButton_2_clicked()
// cv::flip(image,image,1);
cvtColor(image,gray,CV_BGR2GRAY);
// cvtColor(gray,gray,CV_bg)
imshow(&li&,gray);
// cv::cvtColor(image,image,CV_BGR2RGB);
img = QImage((const unsigned char*)(image.data),image.cols,image.rows,image.cols*image.channels(),
QImage::Format_RGB888);
img = QImage((const unsigned char*)(gray.data),gray.cols,gray.rows,gray.step,
QImage::Format_Indexed8);
ui-&label-&setPixmap(QPixmap::fromImage(img));
&&相关文章推荐
* 以上用户言论只代表其个人观点,不代表CSDN网站的观点或立场
访问:2292703次
积分:26620
积分:26620
排名:第220名
原创:563篇
转载:56篇
评论:458条
QT+OpenCV群,。R语言金融分析qq群: 。
阅读:8955
阅读:3920
文章:16篇
阅读:232050
阅读:49983
文章:24篇
阅读:270150
(2)(2)(1)(1)(3)(2)(1)(1)(1)(4)(9)(1)(1)(1)(1)(1)(18)(6)(1)(2)(13)(7)(1)(3)(1)(37)(5)(6)(8)(3)(1)(3)(5)(3)(3)(29)(10)(3)(2)(1)(5)(2)(4)(9)(1)(14)(4)(16)(14)(4)(24)(5)(2)(11)(20)(2)(15)(7)(1)(4)(8)(5)(9)(1)(7)(6)(2)(6)(1)(5)(1)(2)(14)(11)(18)(5)(24)(10)(12)(48)(6)(9)(22)(6)(14)最近使用Qt+openCV获取摄像头的图像
但是仅仅只是使用openCV自带的方法显示图像时,发现计算机CPU使用率比较高,最主要的一点是摄像头驱动自带的设置图像参数的无法直接调用出来
网上查找了很多资料发现 openCV内部是使用了videoInput类的方法实现的.但是没有引用它自身的方法.
经过几天的研究,成功使用videoinput显示摄像头图像,并且可以弹出摄像头驱动自带的图像设置窗口,
不过好像那种USB免驱动的摄像头无法打开,因为用不到就没深入探究.
openCV2.48
Qt5.2.1 用的是VS2010的编译器
1.新建一个Qt工程
做界面什么的,这个不是重点,不详述了
2.解压videoInput 并配置openCV环境
videoinput解压出来有源码和已经编译好的库
我用的是vs2008编译好的库
然后再Qt,pro文件中添加openCV以及video的路径
INCLUDEPATH += D:\opencv\build\includeCONFIG(release,debug|release){
LIBS += D:\opencv\build\x86\vc10\lib\opencv_calib3d248.lib \
D:\opencv\build\x86\vc10\lib\opencv_contrib248.lib \
D:\opencv\build\x86\vc10\lib\opencv_core248.lib \
D:\opencv\build\x86\vc10\lib\opencv_features2d248.lib \
D:\opencv\build\x86\vc10\lib\opencv_flann248.lib \
D:\opencv\build\x86\vc10\lib\opencv_gpu248.lib \
D:\opencv\build\x86\vc10\lib\opencv_highgui248.lib \
D:\opencv\build\x86\vc10\lib\opencv_imgproc248.lib \
D:\opencv\build\x86\vc10\lib\opencv_legacy248.lib \
D:\opencv\build\x86\vc10\lib\opencv_ml248.lib \
D:\opencv\build\x86\vc10\lib\opencv_nonfree248.lib \
D:\opencv\build\x86\vc10\lib\opencv_objdetect248.lib \
D:\opencv\build\x86\vc10\lib\opencv_ocl248.lib \
D:\opencv\build\x86\vc10\lib\opencv_photo248.lib \
D:\opencv\build\x86\vc10\lib\opencv_stitching248.lib \
D:\opencv\build\x86\vc10\lib\opencv_superres248.lib \
D:\opencv\build\x86\vc10\lib\opencv_ts248.lib \
D:\opencv\build\x86\vc10\lib\opencv_video248.lib \
D:\opencv\build\x86\vc10\lib\opencv_videostab248.lib \
D:\opencv\build\include\opencv2\videoInput\videoInput.lib\}CONFIG(debug,debug|release){
LIBS += D:\opencv\build\x86\vc10\lib\opencv_calib3d248d.lib \
D:\opencv\build\x86\vc10\lib\opencv_contrib248d.lib \
D:\opencv\build\x86\vc10\lib\opencv_core248d.lib \
D:\opencv\build\x86\vc10\lib\opencv_features2d248d.lib \
D:\opencv\build\x86\vc10\lib\opencv_flann248d.lib \
D:\opencv\build\x86\vc10\lib\opencv_gpu248d.lib \
D:\opencv\build\x86\vc10\lib\opencv_highgui248d.lib \
D:\opencv\build\x86\vc10\lib\opencv_imgproc248d.lib \
D:\opencv\build\x86\vc10\lib\opencv_legacy248d.lib \
D:\opencv\build\x86\vc10\lib\opencv_ml248d.lib \
D:\opencv\build\x86\vc10\lib\opencv_nonfree248d.lib \
D:\opencv\build\x86\vc10\lib\opencv_objdetect248d.lib \
D:\opencv\build\x86\vc10\lib\opencv_ocl248d.lib \
D:\opencv\build\x86\vc10\lib\opencv_photo248d.lib \
D:\opencv\build\x86\vc10\lib\opencv_stitching248d.lib \
D:\opencv\build\x86\vc10\lib\opencv_ts248d.lib \
D:\opencv\build\x86\vc10\lib\opencv_superres248d.lib \
D:\opencv\build\x86\vc10\lib\opencv_ts248d.lib \
D:\opencv\build\x86\vc10\lib\opencv_video248d.lib \
D:\opencv\build\x86\vc10\lib\opencv_videostab248d.lib \D:\opencv\build\include\opencv2\videoInput\videoInput.lib\ }
然后将include中的videoInput.h文件添加到项目中,
3.进行操作
我将video常用的类写道一个新的类中.引出了一些常用的方法,直接调用videoinput也是可以的
int getTotalCameraCount(); //获得总共可用摄像头数量 char * getDeviceName(int deviceID); //根据摄像头编号获取驱动名称 void showSettingsWindow(int deviceID);//显示属性设置窗口 bool setupDevice(int deviceID, int w, int h); //配置摄像设备 bool setupDevice(int deviceID); //配置摄像设备 bool isFrameNew(int deviceID); //是否有新的一帧 void stopDevice(int deviceID);//停止摄像头工作 unsigned char * getPixels(int deviceID, bool flipRedAndBlue = true, bool flipImage = false );//获取图像数据
全部的方法可以看videoInput.h头文件中的源码
//turns off console messages - default is to print messages
static void setVerbose(bool _verbose);
//Functions in rough order they should be used.
static int listDevices(bool silent = false);
//needs to be called after listDevices - otherwise returns NULL
static char * getDeviceName(int deviceID);
//choose to use callback based capture - or single threaded
void setUseCallback(bool useCallback);
//call before setupDevice
//directshow will try and get the closest possible framerate to what is requested
void setIdealFramerate(int deviceID, int idealFramerate);
//some devices will stop delivering frames after a while - this method gives you the option to try and reconnect
//to a device if videoInput detects that a device has stopped delivering frames.
//you MUST CALL isFrameNew every app loop for this to have any effect
void setAutoReconnectOnFreeze(int deviceNumber, bool doReconnect, int numMissedFramesBeforeReconnect);
//Choose one of these four to setup your device
bool setupDevice(int deviceID);
bool setupDevice(int deviceID, int w, int h);
//These two are only for capture cards
//USB and Firewire cameras souldn't specify connection
bool setupDevice(int deviceID, int connection);
bool setupDevice(int deviceID, int w, int h, int connection);
//If you need to you can set your NTSC/PAL/SECAM
//preference here. if it is available it will be used.
//see #defines above for available formats - eg VI_NTSC_M or VI_PAL_B
//should be called after setupDevice
//can be called multiple times
bool setFormat(int deviceNumber, int format);
//Tells you when a new frame has arrived - you should call this if you have specified setAutoReconnectOnFreeze to true
bool isFrameNew(int deviceID);
bool isDeviceSetup(int deviceID);
//Returns the pixels - flipRedAndBlue toggles RGB/BGR flipping - and you can flip the image too
unsigned char * getPixels(int deviceID, bool flipRedAndBlue = true, bool flipImage = false);
//Or pass in a buffer for getPixels to fill returns true if successful.
bool getPixels(int id, unsigned char * pixels, bool flipRedAndBlue = true, bool flipImage = false);
//Launches a pop up settings window
//For some reason in GLUT you have to call it twice each time.
void showSettingsWindow(int deviceID);
//Manual control over settings thanks.....
//These are experimental for now.
bool setVideoSettingFilter(int deviceID, long Property, long lValue, long Flags = NULL, bool useDefaultValue = false);
bool setVideoSettingFilterPct(int deviceID, long Property, float pctValue, long Flags = NULL);
bool getVideoSettingFilter(int deviceID, long Property, long &min, long &max, long &SteppingDelta, long &tValue, long &flags, long &defaultValue);
bool setVideoSettingCamera(int deviceID, long Property, long lValue, long Flags = NULL, bool useDefaultValue = false);
bool setVideoSettingCameraPct(int deviceID, long Property, float pctValue, long Flags = NULL);
bool getVideoSettingCamera(int deviceID, long Property, long &min, long &max, long &SteppingDelta, long &tValue, long &flags, long &defaultValue);
//bool setVideoSettingCam(int deviceID, long Property, long lValue, long Flags = NULL, bool useDefaultValue = false);
//get width, height and number of pixels
getWidth(int deviceID);
getHeight(int deviceID);
getSize(int deviceID);
//completely stops and frees a device
void stopDevice(int deviceID);
//as above but then sets it up with same settings
bool restartDevice(int deviceID);
videoinput使用起来很简单
(1)将可以可以使用的摄像头名称添加到列表框中,
camera = new MyCamera(); //设置可用摄像头列表 int count =
camera-&getTotalCameraCount(); for(int i=0;i&i++){
QString name = camera-&getDeviceName(i);
ui-&comboBoxCamreaList-&addItem(name); }
(2) 弹出摄像头显示界面,这个方法必须在摄像头打开后才有效
//显示摄像头设置界面
camera-&showSettingsWindow(ui-&comboBoxCamreaList-&currentIndex());
3)根据传入的cv::Mat 对象在QLabel上绘制摄像头图像
void MainWindow::display(cv::Mat &mat){
cv::Mat rgb =
if(mat.channels()==3)
// cv::cvtColor(mat,rgb,CV_BGR2RGB);
img = QImage((const uchar*)(rgb.data),rgb.cols,rgb.rows,rgb.cols*rgb.channels(),QImage::Format_RGB888);
img = QImage((const uchar*)(mat.data),mat.cols,mat.rows,mat.cols*mat.channels(),QImage::Format_Indexed8);
ui-&labelImage-&setPixmap(QPixmap::fromImage(img));
ui-&labelImage-&resize(ui-&labelImage-&pixmap()-&size());
ui-&labelImage-&show();}
(4)打开选中的摄像头,并且开启一个定时器用来定时获取图像
//打开摄像头void MainWindow::on_actionCamreaOpen_triggered(){ int width = 800; int height = 600; mat = cv::Mat(height,width,CV_32SC3); deviceID = ui-&comboBoxCamreaList-&currentIndex(); camera-&setupDevice(deviceID,width,height);//配置摄像机 设置宽为800X600 timer = new QTimer(this); connect(timer,SIGNAL(timeout()),this,SLOT(startLoopSlot())); timer-&start(30);}
(5)定时器开启后调的用图像循环,获取新的一帧的数据,并且调用display方法在QLabel上刷新显示
//图像显示循环void MainWindow::startLoopSlot(){
if(camera-&isFrameNew(deviceID)){
mat.data = camera-&getPixels(deviceID,flipRedAndBlue,flipImage);
display(mat);
(6)摄像头图像的关闭
//关闭摄像头void MainWindow::on_actionCameraClose_triggered(){ timer-&stop();
timer = NULL; camera-&stopDevice(deviceID); qDebug()&&&close Camrea&;}
本文转载自:
欢迎加入我爱机器学习QQ13群:
微信扫一扫,关注我爱机器学习公众号
欢迎加入我爱机器学习QQ13群:
最新文章列表
NIPS 2016 — Day 1 Highlights NIPS 2016 — Day 2 Highlights:...
2017年九月 &(14)
2017年八月 &(58)
2017年七月 &(60)
2017年六月 &(67)
2017年五月 &(66)
2017年四月 &(65)
2017年三月 &(54)
2017年二月 &(48)
2017年一月 &(54)
2016年十二月 &(62)
2016年十一月 &(97)
2016年十月 &(97)
2016年九月 &(124)
2016年八月 &(83)
2016年七月 &(13)
2016年六月 &(10)
2016年五月 &(7)
2016年四月 &(9)
2016年三月 &(7)
2016年二月 &(2)
2016年一月 &(3)
2015年十二月 &(5)
2015年十一月 &(4)
2015年十月 &(2)
2015年九月 &(2)
2015年八月 &(3)
2015年七月 &(6)
2015年六月 &(8)
2015年五月 &(4)
2015年四月 &(1)
2015年三月 &(3)
2015年二月 &(1)
2015年一月 &(2)
2014年十二月 &(4)
2014年十一月 &(2)
2014年十月 &(3)
2014年九月 &(4)
2014年八月 &(22)
2014年七月 &(40)
2014年六月 &(61)
2014年五月 &(63)
2014年四月 &(187)
2014年三月 &(4799)
2014年二月 &(764)
2014年一月 &(330)
2013年十二月 &(145)
2013年十一月 &(126)
2013年十月 &(216)
2013年九月 &(284)
2013年八月 &(327)
2013年七月 &(275)
2013年六月 &(315)
2013年五月 &(228)
2013年四月 &(175)
2013年三月 &(186)
2013年二月 &(118)
2013年一月 &(210)
2012年十二月 &(221)
2012年十一月 &(155)
2012年十月 &(143)
2012年九月 &(98)
2012年八月 &(99)
2012年七月 &(109)
2012年六月 &(75)
2012年五月 &(88)
2012年四月 &(78)
2012年三月 &(78)
2012年二月 &(50)
2012年一月 &(17)
2011年十二月 &(27)
2011年十一月 &(6)
2011年十月 &(11)
2011年九月 &(13)
2011年八月 &(13)
2011年七月 &(19)
2011年六月 &(18)
2011年五月 &(6)
2011年四月 &(12)
2011年三月 &(15)
2011年二月 &(6)
2011年一月 &(9)
2010年十二月 &(6)
2010年十一月 &(11)
2010年十月 &(5)
2010年九月 &(8)
2010年八月 &(5)
2010年七月 &(12)
2010年六月 &(4)
2010年五月 &(7)
2010年四月 &(6)
2010年三月 &(12)
2010年二月 &(7)
2010年一月 &(2)
2009年十二月 &(5)
2009年十一月 &(16)
2009年十月 &(6)
2009年九月 &(7)
2009年八月 &(7)
2009年七月 &(5)
2009年六月 &(6)
2009年五月 &(6)
2009年四月 &(4)
2009年三月 &(7)
2009年二月 &(6)
2009年一月 &(1)
2008年十二月 &(4)
2008年十一月 &(5)
2008年十月 &(1)
2008年八月 &(1)
2008年七月 &(3)
2008年六月 &(3)
2008年五月 &(3)
2008年三月 &(1)
2007年十二月 &(1)
2007年十月 &(1)
2007年八月 &(4)
2007年七月 &(1)使用Qt和OpenCV做的图像低通滤波器_百度文库
两大类热门资源免费畅读
续费一年阅读会员,立省24元!
使用Qt和OpenCV做的图像低通滤波器
&&这是沈阳理工大学的数字图像处理的课程设计报告,是我的原创作品,希望能够帮到学弟学妹。
阅读已结束,下载文档到电脑
想免费下载本文?
定制HR最喜欢的简历
下载文档到电脑,方便使用
还剩16页未读,继续阅读
定制HR最喜欢的简历
你可能喜欢opencv图像处理常用完整示例代码总结分析
#include &StdAfx.h&
int main()
string imageName = &lena.jpg&;
//读入图像
Mat img = imread(imageName, CV_LOAD_IMAGE_COLOR);
//如果读入图像失败
if (img.empty())
cout&&&Could not open or find the image!&&加载-RGB转灰度图-保存&#include &StdAfx.h&
int main()
char* imageName = &lena.jpg&;
Mat image = imread(imageName, 1);
if (!image.data)
cout&&&Could not open or find the image!&&
膨胀操作示例
//载入原图
Mat image = imread(&1.jpg&);
//创建窗口
namedWindow(&原图-膨胀操作&);
namedWindow(&效果图-膨胀操作&);
//显示原图
imshow(&原图-膨胀操作&, image);
//获取自定义核
Mat element = getStructuringElement(MORPH_RECT, Size(15, 15));
//进行膨胀操作
dilate(image,out, element);
//显示效果图
imshow(&效果图-膨胀操作&, out);
waitKey(0);
腐蚀操作示例
//载入原图
Matimage = imread(&1.jpg&);
//创建窗口
namedWindow(&原图-腐蚀操作&);
namedWindow(&效果图-腐蚀操作&);
//显示原图
imshow(&原图-腐蚀操作&, image);
//获取自定义核
Mat element = getStructuringElement(MORPH_RECT, Size(15, 15));
//进行腐蚀操作
erode(image,out, element);
//显示效果图
imshow(&效果图-腐蚀操作&, out);
waitKey(0);
膨胀与腐蚀综合示例
Mat g_srcImage, g_dstI//原始图和效果图
int g_nTrackbarNumer = 0;//0表示腐蚀erode, 1表示膨胀dilate
int g_nStructElementSize = 3; //结构元素(内核矩阵)的尺寸
void Process();//膨胀和腐蚀的处理函数
void on_TrackbarNumChange(int, void *);//回调函数
void on_ElementSizeChange(int, void *);//回调函数
int main( )
//改变console字体颜色
system(&color5E&);
//载入原图
g_srcImage= imread(&1.jpg&);
if(!g_srcImage.data ) { printf(&Oh,no,读取srcImage错误~!\n&); }
//显示原始图
namedWindow(&原始图&);
imshow(&原始图&, g_srcImage);
//进行初次腐蚀操作并显示效果图
namedWindow(&效果图&);
//获取自定义核
Matelement = getStructuringElement(MORPH_RECT, Size(2*g_nStructElementSize
+1,2*g_nStructElementSize+1),Point( g_nStructElementSize, g_nStructElementSize ));
erode(g_srcImage,g_dstImage, element);
imshow(&效果图&, g_dstImage);
//创建轨迹条
createTrackbar(&腐蚀/膨胀&, &效果图&, &g_nTrackbarNumer, 1, on_TrackbarNumChange);
createTrackbar(&内核尺寸&, &效果图&,&g_nStructElementSize, 21, on_ElementSizeChange);
//输出一些帮助信息
cout&<endl<膨胀与腐蚀综合示例2#include &cv.h&
#include &highgui.h&
#include &opencv2/imgproc/imgproc.hpp&
#define TYPE_MORPH_RECT
#define TYPE_MORPH_CROSS
#define TYPE_MORPH_ELLIPSE
#define MAX_ELE_TYPE
#define MAX_ELE_SIZE
Mat src, erode_dst, dilate_
const char *erode_wn
= &eroding demo&;
const char *dilate_wn = &dilating demo&;
int erode_ele_
int dilate_ele_
int erode_ele_
int dilate_ele_
static void Erosion(int, void *);
static void Dilation(int, void *);
* @outputs
int main(int argc, char *argv[])
if (argc & 2) {
cout&&&Usage: ./eroding_and_dilating [file name]&&Qt图像的缩放显示#include &widget.h&
#include &ui_widget.h&
Widget::Widget(QWidget *parent) :
QWidget(parent),
ui(new Ui::Widget)
ui-&setupUi(this);
Widget::~Widget()
void Widget::on_openButton_clicked()
QString fileName = QFileDialog::getOpenFileName(this,tr(&Open Image&),
&.&,tr(&Image Files (*.png *.jpg *.bmp)&));
qDebug()&&&filenames:&&imgfilelabel-&setText(fileName);
//here use 2 ways to make a copy
image.copyTo(originalimg);
//make a copy
originalimg = image.clone();
//clone the img
qimg = Widget::Mat2QImage(image);
display(qimg);
//display by the label
if(image.data)
ui-&saltButton-&setEnabled(true);
ui-&originalButton-&setEnabled(true);
ui-&reduceButton-&setEnabled(true);
QImage Widget::Mat2QImage(const cv::Mat &mat)
if(mat.channels()==3)
//cvt Mat BGR 2 QImage RGB
cvtColor(mat,rgb,CV_BGR2RGB);
img =QImage((const unsigned char*)(rgb.data),
rgb.cols,rgb.rows,
rgb.cols*rgb.channels(),
QImage::Format_RGB888);
img =QImage((const unsigned char*)(mat.data),
mat.cols,mat.rows,
mat.cols*mat.channels(),
QImage::Format_RGB888);
void Widget::display(QImage img)
QImage imgS
imgScaled = img.scaled(ui-&imagelabel-&size(),Qt::KeepAspectRatio);
imgScaled = img.QImage::scaled(ui-&imagelabel-&width(),ui-&imagelabel-&height
(),Qt::KeepAspectRatio);
ui-&imagelabel-&setPixmap(QPixmap::fromImage(imgScaled));
void Widget::on_originalButton_clicked()
qimg = Widget::Mat2QImage(originalimg);
display(qimg);
void Widget::on_saltButton_clicked()
salt(image,3000);
qimg = Widget::Mat2QImage(image);
display(qimg);
void Widget::on_reduceButton_clicked()
colorReduce0(image,64);
qimg = Widget::Mat2QImage(image);
display(qimg);
void Widget::salt(cv::Mat &image, int n)
for (int k=0; k(j,i)= 255;
else if (image.channels() == 3)
{ // color image
image.at(j,i)[0]= 255;
image.at(j,i)[1]= 255;
image.at(j,i)[2]= 255;
// using .ptr and []
void Widget::colorReduce0(cv::Mat &image, int p)
int nl= image. // number of lines
int nc= image.cols * image.channels(); // total number of elements per line
for (int j=0; j(j);
for (int i=0; i#ifndef WIDGET_H
#define WIDGET_H
namespace Ui {
class Widget : public QWidget
explicit Widget(QWidget *parent = 0);
~Widget();
private slots:
void on_openButton_clicked();
QImage Mat2QImage(const cv::Mat &mat);
void display(QImage image);
void salt(cv::Mat &image, int n);
void on_saltButton_clicked();
void on_reduceButton_clicked();
void colorReduce0(cv::Mat &image, int p);
void on_originalButton_clicked();
Ui::Widget *
cv::M //store the original img
QImage imgS
#endif // WIDGET_H
// using .ptr and []
void colorReduce0(cv::Mat &image, int p=64) {
int nl= image. // number of lines
int nc= image.cols * image.channels(); // total number of elements per line
for (int j=0; j(j);
for (int i=0; i(j);
for (int i=0; i(j);
for (int i=0; i(log(static_cast(p))/log(2.0));
// mask used to round the pixel value
uchar mask= 0xFF&(j);
for (int i=0; i(log(static_cast(p))/log(2.0));
int step= image. // effective width
// mask used to round the pixel value
uchar mask= 0xFF&(log(static_cast(p))/log(2.0));
// mask used to round the pixel value
uchar mask= 0xFF&(j);
for (int i=0; i(log(static_cast(p))/log(2.0));
// mask used to round the pixel value
uchar mask= 0xFF&(j);
for (int i=0; i(log(static_cast(p))/log(2.0));
// mask used to round the pixel value
uchar mask= 0xFF&(j);
for (int i=0; i::iterator it= image.begin();
cv::Mat_::iterator itend= image.end();
for ( ; it!= ++it) {
// process each pixel ---------------------
(*it)[0]= (*it)[0]/p*p + p/2;
(*it)[1]= (*it)[1]/p*p + p/2;
(*it)[2]= (*it)[2]/p*p + p/2;
// end of pixel processing ----------------
// using Mat_ iterator and bitwise
void colorReduce9(cv::Mat &image, int p=64) {
// p must be a power of 2
int n= static_cast(log(static_cast(p))/log(2.0));
// mask used to round the pixel value
uchar mask= 0xFF&::iterator it= image.begin();
cv::Mat_::iterator itend= image.end();
// scan all pixels
for ( ; it!= ++it) {
// process each pixel ---------------------
(*it)[0]= (*it)[0]&mask + p/2;
(*it)[1]= (*it)[1]&mask + p/2;
(*it)[2]= (*it)[2]&mask + p/2;
// end of pixel processing ----------------
// using MatIterator_
void colorReduce10(cv::Mat &image, int p=64) {
// get iterators
cv::Mat_ cimage=
cv::Mat_::iterator it=cimage.begin();
cv::Mat_::iterator itend=cimage.end();
for ( ; it!= it++) {
// process each pixel ---------------------
(*it)[0]= (*it)[0]/p*p + p/2;
(*it)[1]= (*it)[1]/p*p + p/2;
(*it)[2]= (*it)[2]/p*p + p/2;
// end of pixel processing ----------------
void colorReduce11(cv::Mat &image, int p=64) {
int nl= image. // number of lines
int nc= image. // number of columns
for (int j=0; j(j,i)[0]=image.at(j,i)[0]/p*p + p/2;
image.at(j,i)[1]=image.at(j,i)[1]/p*p + p/2;
image.at(j,i)[2]=image.at(j,i)[2]/p*p + p/2;
// end of pixel processing ----------------
} // end of line
// with input/ouput images
void colorReduce12(const cv::Mat &image, // input image
cv::Mat &result,
// output image
int p=64) {
int nl= image. // number of lines
int nc= image. // number of columns
// allocate output image if necessary
result.create(image.rows,image.cols,image.type());
// created images have no padded pixels
// it is now a 1D array
int n= static_cast(log(static_cast(p))/log(2.0));
// mask used to round the pixel value
uchar mask= 0xFF&(j);
const uchar* idata= image.ptr(j);
for (int i=0; i(log(static_cast(p))/log(2.0));
// mask used to round the pixel value
uchar mask= 0xFF&图像锐化1sharp.h#pragma once
namespace ggicci
void sharpen(const Mat& img, Mat& result);
#include &sharp.h&
void ggicci::sharpen(const Mat& img, Mat& result)
result.create(img.size(), img.type());
//处理边界内部的像素点, 图像最外围的像素点应该额外处理
for (int row = 1; row & img.rows-1; row++)
//前一行像素点
const uchar* previous = img.ptr(row-1);
//待处理的当前行
const uchar* current = img.ptr(row);
const uchar* next = img.ptr(row+1);
uchar *output = result.ptr(row);
int ch = img.channels();
int starts =
int ends = (img.cols - 1) *
for (int col = col & col++)
//输出图像的遍历指针与当前行的指针同步递增, 以每行的每一个像素点的每一个通道值
为一个递增量, 因为要考虑到图像的通道数
*output++ = saturate_cast(5 * current[col] - current[col-ch] - current
[col+ch] - previous[col] - next[col]);
} //end loop
//处理边界, 外围像素点设为 0
result.row(0).setTo(Scalar::all(0));
result.row(result.rows-1).setTo(Scalar::all(0));
result.col(0).setTo(Scalar::all(0));
result.col(result.cols-1).setTo(Scalar::all(0));
#pragma comment(lib, &opencv_core231d.lib&)
#pragma comment(lib, &opencv_highgui231d.lib&)
#pragma comment(lib, &opencv_imgproc231d.lib&)
#include &sharp.h&
int main()
Mat lena = imread(&lena.jpg&);
Mat sharpenedL
ggicci::sharpen(lena, sharpenedLena);
imshow(&lena&, lena);
imshow(&sharpened lena&, sharpenedLena);
cvWaitKey();
int main()
Mat lena = imread(&lena.jpg&);
Mat sharpenedL
Mat kernel = (Mat_(3, 3) && 0, -1, 0, -1, 5, -1, 0, -1, 0);
cv::filter2D(lena, sharpenedLena, lena.depth(), kernel);
imshow(&lena&, lena);
imshow(&sharpened lena&, sharpenedLena);
cvWaitKey();
简单的灰度图像的直方图计算
int main()
Mat img = imread(&lena.jpg&, CV_LOAD_IMAGE_GRAYSCALE);
Mat* arrays = &
int narrays = 1;
int channels[] = { 0 };
InputArray mask = noArray();
int dims = 1;
int histSize[] = { 256 };
float hranges[] = { 0.0, 255.0 };
const float *ranges[] = { hranges };
//调用 calcHist 计算直方图, 结果存放在 hist 中
calcHist(arrays, narrays, channels, mask, hist, dims, histSize, ranges);
//调用一个我自己写的简单的函数用于获取一张显示直方图数据的图片,
//输入参数为直方图数据 hist 和期望得到的图片的尺寸
Mat histImg = ggicci::getHistogram1DImage(hist, Size(600, 420));
imshow(&lena gray image histogram&, histImg);
waitKey();
Mat ggicci::getHistogram1DImage(const Mat& hist, Size imgSize)
Mat histImg(imgSize, CV_8UC3);
int Padding = 10;
int W = imgSize.width - 2 * P
int H = imgSize.height - 2 * P
minMaxLoc(hist, NULL, &_max);
double Per = (double)H / _
const Point Orig(Padding, imgSize.height-Padding);
int bin = W / (hist.rows + 2);
for (int i = 1; i &= hist. i++)
Point pBottom(Orig.x + i * bin, Orig.y);
Point pTop(pBottom.x, pBottom.y - Per * hist.at(i-1));
line(histImg, pBottom, pTop, Scalar(255, 0, 0), bin);
//画 3 条红线标明区域
line(histImg, Point(Orig.x + bin, Orig.y - H), Point(Orig.x + hist.rows *
Orig.y - H), Scalar(0, 0, 255), 1);
line(histImg, Point(Orig.x + bin, Orig.y), Point(Orig.x + bin, Orig.y - H), Scalar
(0, 0, 255), 1);
line(histImg, Point(Orig.x + hist.rows * bin, Orig.y), Point(Orig.x + hist.rows *
bin, Orig.y - H), Scalar(0, 0, 255), 1);
drawArrow(histImg, Orig, Orig+Point(W, 0), 10, 30, Scalar::all(0), 2);
drawArrow(histImg, Orig, Orig-Point(0, H), 10, 30, Scalar::all(0), 2);
return histI
图像缩放-最近邻插值-双线性插值
#include &stdafx.h&
int main(int argc ,char ** argv)
IplImage *scr=0;
IplImage *dst=0;
double scale=4;
CvSize dst_
if (argc==2&&(scr=cvLoadImage(argv[1],-1))!=0)
dst_cvsize.width=(int)(scr-&width*scale);
dst_cvsize.height=(int)(scr-&height*scale);
dst=cvCreateImage(dst_cvsize,scr-&depth,scr-&nChannels);
cvResize(scr,dst,CV_INTER_NN);//
CV_INTER_NN - 最近邻插值,
CV_INTER_LINEAR - 双线性插值 (缺省使用)
CV_INTER_AREA - 使用象素关系重采样。当图像缩小时候,该方法可以避免波纹出现。
/*当图像放大时,类似于 CV_INTER_NN 方法..*/
CV_INTER_CUBIC - 立方插值.
cvNamedWindow(&scr&,CV_WINDOW_AUTOSIZE);
cvNamedWindow(&dst&,CV_WINDOW_AUTOSIZE);
cvShowImage(&scr&,scr);
cvShowImage(&dst&,dst);
cvWaitKey();
cvReleaseImage(&scr);
cvReleaseImage(&dst);
cvDestroyWindow(&scr&);
cvDestroyWindow(&dst&);
图片加&怀旧色&滤镜保存输出
int main(int argc, char ** argv)
// input args check
if(argc & 3){
printf(&please input args.\n&);
printf(&e.g. : ./test infilepath outfilepath \n&);
char * input = argv[1];
char * output = argv[2];
printf(&input: %s, output: %s\n&, input, output);
Mat src = imread(input, 1);
int width=src.
int heigh=src.
Mat img(src.size(),CV_8UC3);
for (int y=0; y(y);
uchar* P1 = img.ptr(y);
for (int x=0; x255)newB=255;
if(newG&0)newG=0;
if(newG&255)newG=255;
if(newR&0)newR=0;
if(newR&255)newR=255;
P1[3*x] = (uchar)newB;
P1[3*x+1] = (uchar)newG;
P1[3*x+2] = (uchar)newR;
//imshow(&out&,img);
waitKey();
imwrite(output,img);
浮雕和雕刻效果
#pragma comment( lib, &cv.lib& )
#pragma comment( lib, &cxcore.lib& )
#pragma comment( lib, &highgui.lib& )
int main()
IplImage *org=cvLoadImage(&1.jpg&,1);
IplImage *image=cvCloneImage(org);
int width=image-&
int height=image-&
int step=image-&widthS
int channel=image-&nC
uchar* data=(uchar *)image-&imageD
for(int i=0;i255)
data[j*step+i*channel+k]=255;
else if(temp&0)
data[j*step+i*channel+k]=0;
data[j*step+i*channel+k]=
cvNamedWindow(&original&,1);
cvShowImage(&original&,org);
cvNamedWindow(&image&,1);
cvShowImage(&image&,image);
cvWaitKey(0);
cvDestroyAllWindows();
cvReleaseImage(&image);
cvReleaseImage(&org);
图像褶皱效果
#pragma comment( lib, &cv.lib& )
#pragma comment( lib, &cxcore.lib& )
#pragma comment( lib, &highgui.lib& )
int main()
IplImage *org=cvLoadImage(&lena.jpg&,1);
IplImage *image=cvCloneImage(org);
int width=image-&
int height=image-&
int step=image-&widthS
int channel=image-&nC
uchar* data=(uchar *)image-&imageD
int sign=-1;
for(int i=0;iGrabcut算法#include &stdafx.h&
#include &opencv2/highgui/highgui.hpp&
#include &opencv2/imgproc/imgproc.hpp&
#include &ComputeTime.h&
#include &windows.h&
static void help()
cout && &\nThis program demonstrates GrabCut segmentation -- select an object in a
&and then grabcut will attempt to segment it out.\n&
&./grabcut \n&
&\nSelect a rectangular area around the object you want to segment\n& &&
&\nHot keys: \n&
&\tESC - quit the program\n&
&\tr - restore the original image\n&
&\tn - next iteration\n&
&\tleft mouse button - set rectangle\n&
&\tCTRL+left mouse button - set GC_BGD pixels\n&
&\tSHIFT+left mouse button - set CG_FGD pixels\n&
&\tCTRL+right mouse button - set GC_PR_BGD pixels\n&
&\tSHIFT+right mouse button - set CG_PR_FGD pixels\n& &&
const Scalar RED = Scalar(0,0,255);
const Scalar PINK = Scalar(230,130,255);
const Scalar BLUE = Scalar(255,0,0);
const Scalar LIGHTBLUE = Scalar(255,255,160);
const Scalar GREEN = Scalar(0,255,0);
const int BGD_KEY = CV_EVENT_FLAG_CTRLKEY;
const int FGD_KEY = CV_EVENT_FLAG_SHIFTKEY; //Shift键
static void getBinMask( const Mat& comMask, Mat& binMask )
if( comMask.empty() || comMask.type()!=CV_8UC1 )
CV_Error( CV_StsBadArg, &comMask is empty or has incorrect type (not CV_8UC1)& );
if( binMask.empty() || binMask.rows!=comMask.rows || binMask.cols!=comMask.cols )
binMask.create( comMask.size(), CV_8UC1 );
binMask = comMask & 1;
//得到mask的最低位,实际上是只保留确定的或者有可能的前景点当做
class GCApplication
enum{ NOT_SET = 0, IN_PROCESS = 1, SET = 2 };
static const int radius = 2;
static const int thickness = -1;
void reset();
void setImageAndWinName( const Mat& _image, const string& _winName );
void showImage()
void mouseClick( int event, int x, int y, int flags, void* param );
int nextIter();
int getIterCount() const { return iterC }
void setRectInMask();
void setLblsInMask( int flags, Point p, bool isPr );
const string* winN
const Mat*
Mat bgdModel, fgdM
uchar rectState, lblsState, prLblsS
vector fgdPxls, bgdPxls, prFgdPxls, prBgdP
/*给类的变量赋值*/
void GCApplication::reset()
if( !mask.empty() )
mask.setTo(Scalar::all(GC_BGD));
bgdPxls.clear(); fgdPxls.clear();
prBgdPxls.clear();
prFgdPxls.clear();
isInitialized =
rectState = NOT_SET;
//NOT_SET == 0
lblsState = NOT_SET;
prLblsState = NOT_SET;
iterCount = 0;
/*给类的成员变量赋值而已*/
void GCApplication::setImageAndWinName( const Mat& _image, const string& _winName
if( _image.empty() || _winName.empty() )
image = &_
winName = &_winN
mask.create( image-&size(), CV_8UC1);
/*显示4个点,一个矩形和图像内容,因为后面的步骤很多地方都要用到这个函数,所以单独拿出来*/
void GCApplication::showImage() const
if( image-&empty() || winName-&empty() )
if( !isInitialized )
image-&copyTo( res );
getBinMask( mask, binMask );
image-&copyTo( res, binMask );
//按照最低位是0还是1来复制,只保留跟前景有关的图像
,比如说可能的前景,可能的背景
vector::const_
/*下面4句代码是将选中的4个点用不同的颜色显示出来*/
for( it = bgdPxls.begin(); it != bgdPxls.end(); ++it )
//迭代器可以看成是一个指针
circle( res, *it, radius, BLUE, thickness );
for( it = fgdPxls.begin(); it != fgdPxls.end(); ++it )
//确定的前景用红色表示
circle( res, *it, radius, RED, thickness );
for( it = prBgdPxls.begin(); it != prBgdPxls.end(); ++it )
circle( res, *it, radius, LIGHTBLUE, thickness );
for( it = prFgdPxls.begin(); it != prFgdPxls.end(); ++it )
circle( res, *it, radius, PINK, thickness );
/*画矩形*/
if( rectState == IN_PROCESS || rectState == SET )
rectangle( res, Point( rect.x, rect.y ), Point(rect.x + rect.width, rect.y +
rect.height ), GREEN, 2);
imshow( *winName, res );
/*该步骤完成后,mask图像中rect内部是3,外面全是0*/
void GCApplication::setRectInMask()
assert( !mask.empty() );
mask.setTo( GC_BGD );
//GC_BGD == 0
rect.x = max(0, rect.x);
rect.y = max(0, rect.y);
rect.width = min(rect.width, image-&cols-rect.x);
rect.height = min(rect.height, image-&rows-rect.y);
(mask(rect)).setTo( Scalar(GC_PR_FGD) );
//GC_PR_FGD == 3,矩形内部,为可能的前景点
void GCApplication::setLblsInMask( int flags, Point p, bool isPr )
vector *bpxls, *
uchar bvalue,
if( !isPr ) //确定的点
bpxls = &bgdP
fpxls = &fgdP
bvalue = GC_BGD;
fvalue = GC_FGD;
bpxls = &prBgdP
fpxls = &prFgdP
bvalue = GC_PR_BGD; //2
fvalue = GC_PR_FGD; //3
if( flags & BGD_KEY )
bpxls-&push_back(p);
circle( mask, p, radius, bvalue, thickness );
//该点处为2
if( flags & FGD_KEY )
fpxls-&push_back(p);
circle( mask, p, radius, fvalue, thickness );
//该点处为3
/*鼠标响应函数,参数flags为CV_EVENT_FLAG的组合*/
void GCApplication::mouseClick( int event, int x, int y, int flags, void* )
// TODO add bad args check
switch( event )
case CV_EVENT_LBUTTONDOWN: // set rect or GC_BGD(GC_FGD) labels
bool isb = (flags & BGD_KEY) != 0,
isf = (flags & FGD_KEY) != 0;
if( rectState == NOT_SET && !isb && !isf )//只有左键按下时
rectState = IN_PROCESS; //表示正在画矩形
rect = Rect( x, y, 1, 1 );
if ( (isb || isf) && rectState == SET ) //按下了alt键或者shift键,且画好了矩形
,表示正在画前景背景点
lblsState = IN_PROCESS;
case CV_EVENT_RBUTTONDOWN: // set GC_PR_BGD(GC_PR_FGD) labels
bool isb = (flags & BGD_KEY) != 0,
isf = (flags & FGD_KEY) != 0;
if ( (isb || isf) && rectState == SET ) //正在画可能的前景背景点
prLblsState = IN_PROCESS;
case CV_EVENT_LBUTTONUP:
if( rectState == IN_PROCESS )
rect = Rect( Point(rect.x, rect.y), Point(x,y) );
//矩形结束
rectState = SET;
setRectInMask();
assert( bgdPxls.empty() && fgdPxls.empty() && prBgdPxls.empty() &&
prFgdPxls.empty() );
showImage();
if( lblsState == IN_PROCESS )
//已画了前后景点
setLblsInMask(flags, Point(x,y), false);
//画出前景点
lblsState = SET;
showImage();
case CV_EVENT_RBUTTONUP:
if( prLblsState == IN_PROCESS )
setLblsInMask(flags, Point(x,y), true); //画出背景点
prLblsState = SET;
showImage();
case CV_EVENT_MOUSEMOVE:
if( rectState == IN_PROCESS )
rect = Rect( Point(rect.x, rect.y), Point(x,y) );
assert( bgdPxls.empty() && fgdPxls.empty() && prBgdPxls.empty() &&
prFgdPxls.empty() );
showImage();
//不断的显示图片
else if( lblsState == IN_PROCESS )
setLblsInMask(flags, Point(x,y), false);
showImage();
else if( prLblsState == IN_PROCESS )
setLblsInMask(flags, Point(x,y), true);
showImage();
/*该函数进行grabcut算法,并且返回算法运行迭代的次数*/
int GCApplication::nextIter()
if( isInitialized )
//使用grab算法进行一次迭代,参数2为mask,里面存的mask位是:矩形内部除掉那些可能是背
景或者已经确定是背景后的所有的点,且mask同时也为输出
//保存的是分割后的前景图像
grabCut( *image, mask, rect, bgdModel, fgdModel, 1 );
if( rectState != SET )
return iterC
if( lblsState == SET || prLblsState == SET )
grabCut( *image, mask, rect, bgdModel, fgdModel, 1, GC_INIT_WITH_MASK );
grabCut( *image, mask, rect, bgdModel, fgdModel, 1, GC_INIT_WITH_RECT );
isInitialized =
iterCount++;
bgdPxls.clear(); fgdPxls.clear();
prBgdPxls.clear(); prFgdPxls.clear();
return iterC
static void on_mouse( int event, int x, int y, int flags, void* param )
gcapp.mouseClick( event, x, y, flags, param );
int main( int argc, char** argv )
cout&&& Grabcuts ! \n&;
cout&&&input image name:
Mat image = imread( filename, 1 );
if( image.empty() )
cout && &\n Durn, couldn&#39;t read image filename & && filename &&
const string winName = &image&;
cvNamedWindow( winName.c_str(), CV_WINDOW_AUTOSIZE );
cvSetMouseCallback( winName.c_str(), on_mouse, 0 );
gcapp.setImageAndWinName( image, winName );
gcapp.showImage();
int c = cvWaitKey(0);
switch( (char) c )
case &#39;\x1b&#39;:
cout && &Exiting ...& &&
goto exit_
case &#39;r&#39;:
gcapp.reset();
gcapp.showImage();
case &#39;n&#39;:
ct.Begin();
int iterCount = gcapp.getIterCount();
cout && &&& && iterCount && &... &;
int newIterCount = gcapp.nextIter();
if( newIterCount & iterCount )
gcapp.showImage();
cout && iterCount && &&& &&
cout&&&运行时间:
&&<ct.end()<lazy snappinglszySnapping.cppLazySnapping.cpp
#include &stdafx.h&
#include &graph.h&
typedef Graph GraphT
class LasySnapping
LasySnapping();
~LasySnapping()
vector foreP
vector backP
// average color of foreground points
unsigned char avgForeColor[3];
// average color of background points
unsigned char avgBackColor[3];
void setImage(IplImage* image)
this-&image =
graph = new GraphType(image-&width*image-&height,image-&width*image-&height*2);
// include-pen locus
void setForegroundPoints(vector pts)
forePts.clear();
for(int i =0; i& pts.size(); i++)
if(!isPtInVector(pts[i],forePts))
forePts.push_back(pts[i]);
if(forePts.size() == 0)
int sum[3] = {0};
for(int i =0; i & forePts.size(); i++)
unsigned char* p = (unsigned char*)image-&imageData + forePts[i].x * 3
+ forePts[i].y*image-&widthS
sum[0] += p[0];
sum[1] += p[1];
sum[2] += p[2];
cout&<sum[0]<width/SCALE,image-&height/SCALE),
cvResize(image,imageLS);
ls.setImage(imageLS);
ls.setBackgroundPoints(backPts);
ls.setForegroundPoints(forePts);
ls.runMaxflow();
IplImage* mask = ls.getImageMask();
IplImage* gray = cvCreateImage(cvGetSize(image),8,1);
cvResize(mask,gray);
cvCanny(gray,gray,50,150,3);
IplImage* showImg = cvCloneImage(imageDraw);
for(int h =0; h & image-& h ++)
unsigned char* pgray = (unsigned char*)gray-&imageData + gray-&widthStep*h;
unsigned char* pimage = (unsigned char*)showImg-&imageData + showImg-
&widthStep*h;
for(int width
=0; width & image-& width++)
if(*pgray++ != 0 )
pimage[0] = 0;
pimage[1] = 255;
pimage[2] = 0;
pimage+=3;
cvSaveImage(&t.bmp&,showImg);
cvShowImage(winName,showImg);
cvReleaseImage(&imageLS);
cvReleaseImage(&mask);
cvReleaseImage(&showImg);
cvReleaseImage(&gray);
else if( event == CV_EVENT_LBUTTONDOWN )
else if( event == CV_EVENT_MOUSEMOVE && (flags & CV_EVENT_FLAG_LBUTTON))
CvPoint pt = cvPoint(x,y);
if(currentMode == 0)
{//foreground
forePts.push_back(cvPoint(x/SCALE,y/SCALE));
{//background
backPts.push_back(cvPoint(x/SCALE,y/SCALE));
cvCircle(imageDraw,pt,2,paintColor[currentMode]);
cvShowImage(winName,imageDraw);
int main(int argc, char** argv)
//if(argc != 2)
cout&&&command : lazysnapping inputImage&&
#include &CvxText.h&
#pragma comment(lib,&freetype255d.lib&)
#pragma comment(lib,&opencv_core2410d.lib&)
#pragma comment(lib,&opencv_highgui2410d.lib&)
#pragma comment(lib,&opencv_imgproc2410d.lib&)
#define ROW_BLOCK 2
#define COLUMN_Block 2
writePng.cpp : 定义控制台应用程序的入口点。
int run_test_png(Mat &mat,string image_name)
/*采用自己设置的参数来保存图片*/
//Mat mat(480, 640, CV_8UC4);
//createAlphaMat(mat);
vector compression_
compression_params.push_back(CV_IMWRITE_PNG_COMPRESSION);
compression_params.push_back(9);
//png格式下,默认的参数为3.
imwrite(image_name, mat, compression_params);
catch (runtime_error& ex)
fprintf(stderr, &Exception converting image to PNG format: %s\n&, ex.what());
fprintf(stdout, &Saved PNG file with alpha data.\n&);
waitKey(0);
int coloured(Mat &template_src, Mat &mat_png, CvScalar color)
for (int i = 0; i & template_src. ++i)
for (int j = 0; j & template_src. ++j)
Vec4b& bgra = mat_png.at(i, j);
//int temp = template_src.at(i,j);
if (template_src.at(i,j)== 0)
bgra[0] = color.val[0];
bgra[1] = color.val[1];
bgra[2] = color.val[2];
bgra[3] = 255;//alpha通道全部设置为透明完全透明为0,否则为255
bgra[3] = 0;//alpha通道全部设置为透明完全透明为0,否则为255
void ImageBinarization(IplImage *src)
/*对灰度图像二值化,自适应门限threshold*/
int i,j,width,height,step,chanel,
/*size是图像尺寸,svg是灰度直方图均值,va是方差*/
float size,avg,va,maxVa,p,a,s;
unsigned char *dataS
float histogram[256];
width = src-&
height = src-&
dataSrc = (unsigned char *)src-&imageD
step = src-&widthStep/sizeof(char);
chanel = src-&nC
/*计算直方图并归一化histogram*/
for(i=0; i&256; i++)
histogram[i] = 0;
for(i=0; i maxVa)
threshold =
/*二值化*/
for(i=0; i threshold)
dataSrc[i*step+j] = 255;
dataSrc[i*step+j] = 0;
Mat binaryzation(Mat &src)
Mat des_gray(src.size(),CV_8UC1);
cvtColor(src,des_gray,CV_BGR2GRAY);
//Mat bin_mat();
IplImage temp(des_gray);
ImageBinarization(&temp);
//threshold(des_gray,des_gray,150,255,THRESH_BINARY);
imshow(&二值图像&,des_gray);
return des_
int generate_chinese(const int size_zi, const char *msg ,int number,CvScalar color)
//int size_zi = 50;//字体大小
CvSize czS
//目标图像尺寸
float p = 0.5;
//读取TTF字体文件
CvxText text(&simhei.ttf&);
//设置字体属性 字体大小/空白比例/间隔比例/旋转角度
fsize = cvScalar(size_zi, 1, 0.1, 0);
text.setFont(NULL, &fsize, NULL, &p);
czSize.width = size_zi*
czSize.height = size_
//加载原图像
IplImage* ImageSrc = cvCreateImage(czSize,IPL_DEPTH_8U,3);//cvLoadImage(Imagename,
CV_LOAD_IMAGE_UNCHANGED);
//Mat image(ImageSrc);
//createAlphaMat(image);
//ImageSrc = ?
//IplImage temp(image);
//ImageSrc = &
//设置原图像文字
text.putText(ImageSrc, msg, cvPoint(1, size_zi), color);
//显示原图像
cvShowImage(&原图&, ImageSrc);
string hanzi =
hanzi = hanzi + &.png&;
Mat chinese(ImageSrc,true);
Mat gray = binaryzation(chinese);
imwrite(&chinese_gray.jpg&,gray);
Mat mat_png(chinese.size(),CV_8UC4);
coloured(gray,mat_png,color);
run_test_png(mat_png,hanzi);
////cvSaveImage(&hanzi.jpg&,reDstImage);
//run_test_png(chinese,hanzi);
//等待按键事件
cvWaitKey();
int main()
CvScalar color = CV_RGB(0,0,0);
int size = 200;
const char* msg = &你好a&;//暂时一行字不要太长
int number = 3;//字符个数
generate_chinese(size,msg,number,color);
}</sum[0]<
</sum[0]<</ct.end()<

我要回帖

更多关于 qt opencv 图像处理 的文章

 

随机推荐