以前我用 gabor 和 adaboost 做过图像识别,
现在用另一种方法试试。首先把做卷积的 gabor 核改一下加强方向性。
- ZeGabor::ZeGabor(double dPhi, double dNu, double dSigma, double dF)
-
{
-
double Sigma = dSigma;
-
double F = dF;
-
-
double Kmax = PI/2;
-
-
// Absolute value of K
-
double K = Kmax / pow(F, dNu);
-
double Phi = dPhi;
-
-
double dModSigma = Sigma/K;
-
-
double dWidth = cvRound(dModSigma*6 + 1);
-
-
if (fmod(dWidth, 2.0)==0.0) dWidth++;
-
-
Width = (long)dWidth;
-
-
Real = cvCreateMat( Width, Width, CV_32FC1);
-
Imag = cvCreateMat( Width, Width, CV_32FC1);
-
-
CvMat *mReal, *mImag;
-
mReal = cvCreateMat( Width, Width, CV_32FC1);
-
mImag = cvCreateMat( Width, Width, CV_32FC1);
-
-
double a, b;
-
double c;
-
double x, y;
-
double ra, rb;
-
double dReal;
-
double dImag;
-
double dTemp1, dTemp2, dTemp3;
-
-
a = 0.4;
-
b = 1.0;
-
c = dPhi;
-
-
-
for (int i = 0; i < Width; i++)
-
{
-
for (int j = 0; j < Width; j++)
-
{
-
x = i-(Width-1)/2;
-
y = j-(Width-1)/2;
-
-
ra = x*cos(c) + y*sin(c);
-
rb = -x*sin(c) + y*cos(c);
-
-
dTemp1 = (K*K/Sigma*Sigma)*exp(-(ra*ra/(a*a)+rb*rb/(b*b))*K*K/(2*Sigma*Sigma));
-
dTemp2 = cos(K*cos(Phi)*1.5*x + K*sin(Phi)*1.5*y) - exp(-(pow(Sigma,2)/2));
-
dTemp3 = sin(K*cos(Phi)*1.5*x + K*sin(Phi)*1.5*y);
-
-
-
dReal = dTemp1*dTemp2;
-
dImag = dTemp1*dTemp3;
-
-
cvSetReal2D((CvMat*)mReal, i, j, dReal );
-
cvSetReal2D((CvMat*)mImag, i, j, dImag );
-
}
-
}
-
/**************************** Gabor Function ****************************/
-
cvCopy(mReal, Real, NULL);
-
cvCopy(mImag, Imag, NULL);
-
//printf("A %d x %d Gabor kernel with %f PI in arc is created.\n", Width, Width, Phi/PI);
-
cvReleaseMat( &mReal );
-
cvReleaseMat( &mImag );
-
}
这图经过放大,实际用的卷积核比这个小很多。用一组不同方向的
ZeGabor 核和图像作卷积,在卷积结果
图中,图像的边沿部分的坐标点上的不同方向结果中会有某一个或几个比平均值大很多,取这样的点做特征
点,这些方向做特征值,可以构成一组特征用于图像识别。
这是要识别的目标图像的标准图,右图上的黄点是取得的特征点,特征点上的特征值是此点上的图像边沿方
向。
提取标准图的特征:
- IplImage *img_gray = cvCreateImage( cvGetSize(img_src), 8, 1 );
-
-
cvCvtColor(img_src, img_gray, CV_BGR2GRAY);
-
-
CvMat *mat_edge = cvCreateMat(img_gray->height, img_gray->width, CV_32FC1);
-
CvMat *mat_dire = cvCreateMat(img_gray->height, img_gray->width, CV_32SC1);
-
CvMat *mat_mask = cvCreateMat(img_gray->height, img_gray->width, CV_32FC1);
-
-
CvMat *mat_max_dire = cvCreateMat(img_gray->height, img_gray->width, CV_32SC1);
-
-
CvMat *mat_mags[ZGDIRES];
-
-
for (int i=0; i<ZGDIRES; i++)
-
{
-
mat_mags[i] = cvCreateMat(img_gray->height, img_gray->width, CV_32FC1);
-
}
-
-
ZeGabor *gabors[ZGDIRES];
-
-
double Sigma = 2*PI;
-
double F = sqrt(2.0);
-
double dn = ZGDN;
-
-
for (int n=0; n<ZGDIRES; n++)
-
{
-
gabors[n] = new ZeGabor((PI/ZGDIRES)*n, dn, Sigma, F);
-
}
-
-
for (int n=0; n<ZGDIRES; n++)
-
{
-
gabors[n]->conv_mat_mag(img_gray, mat_mags[n]);
-
}
-
-
for (int y=0; y<img_gray->height; y++)
-
{
-
for (int x=0; x<img_gray->width; x++)
-
{
-
float sum;
-
float average;
-
float max_ve;
-
int dire;
-
-
sum = 0.0;
-
dire = 0;
-
max_ve = CV_MAT_ELEM(*mat_mags[0], float, y, x);
-
-
for (int n=0; n<ZGDIRES; n++)
-
{
-
float ve = CV_MAT_ELEM(*mat_mags[n], float, y, x);
-
sum += ve;
-
if (ve > max_ve)
-
{
-
max_ve = ve;
-
dire = n;
-
}
-
}
-
-
average = sum/ZGDIRES;
-
-
CV_MAT_ELEM(*mat_edge, float, y, x) = 0;
-
CV_MAT_ELEM(*mat_dire, int, y, x) = dire;
-
-
if (max_ve > 4.0*average) CV_MAT_ELEM(*mat_edge, float, y, x) = 255;
-
}
-
}
-
-
ZPoint zpoint;
-
-
for (int y=5; y<img_gray->height-5; y+=2)
-
{
-
for (int x=5; x<img_gray->width-5; x+=2)
-
{
-
if (CV_MAT_ELEM(*mat_edge, float, y, x) > 254)
-
{
-
zpoint.x = x;
-
zpoint.y = y;
-
-
zpoint.dire = CV_MAT_ELEM(*mat_dire, int, y, x);
-
-
filter_zpoints.push_back(zpoint);
-
}
-
}
-
}
-
-
std::cout << "filter_zpoints.size : " << filter_zpoints.size() << std::endl;
-
-
for (int i=0; i<ZGDIRES; i++)
-
{
-
cvReleaseMat(&mat_mags[i]);
-
}
检测目标图像:
- IplImage *img_gray = cvCreateImage( cvGetSize(img_src), 8, 1 );
-
cvCvtColor(img_src, img_gray, CV_BGR2GRAY);
-
-
ZGFeature *p_zg_features = new ZGFeature[img_gray->height*img_gray->width];
-
-
CvMat *mat_edge = cvCreateMat(img_gray->height, img_gray->width, CV_32FC1);
-
CvMat *mat_dire = cvCreateMat(img_gray->height, img_gray->width, CV_32SC1);
-
CvMat *mat_mask = cvCreateMat(img_gray->height, img_gray->width, CV_32FC1);
-
-
CvMat *mat_mags[ZGDIRES];
-
-
for (int i=0; i<ZGDIRES; i++)
-
{
-
mat_mags[i] = cvCreateMat(img_gray->height, img_gray->width, CV_32FC1);
-
}
-
-
ZeGabor *gabors[ZGDIRES];
-
-
double Sigma = 2*PI;
-
double F = sqrt(2.0);
-
double dn = ZGDN;
-
-
for (int n=0; n<ZGDIRES; n++)
-
{
-
gabors[n] = new ZeGabor((PI/ZGDIRES)*n, dn, Sigma, F);
-
}
-
-
for (int n=0; n<ZGDIRES; n++)
-
{
-
gabors[n]->conv_mat_mag(img_gray, mat_mags[n]);
-
}
-
-
for (int y=0; y<img_gray->height; y++)
-
{
-
for (int x=0; x<img_gray->width; x++)
-
{
-
float sum;
-
float average;
-
float max_ve;
-
int dire;
-
-
sum = 0.0;
-
dire = 0;
-
max_ve = CV_MAT_ELEM(*mat_mags[0], float, y, x);
-
-
ZGFeature *p_zg_f = p_zg_features + y*img_gray->width + x;
-
-
for (int n=0; n<ZGDIRES; n++)
-
{
-
float ve = CV_MAT_ELEM(*mat_mags[n], float, y, x);
-
-
p_zg_f->dire_ves[n] = ve;
-
-
sum += ve;
-
if (ve > max_ve)
-
{
-
max_ve = ve;
-
dire = n;
-
}
-
}
-
-
average = sum/ZGDIRES;
-
-
p_zg_f->ve_average = average;
-
-
CV_MAT_ELEM(*mat_edge, float, y, x) = 0;
-
CV_MAT_ELEM(*mat_dire, int, y, x) = dire;
-
}
-
}
-
-
for (int i=0; i<ZGDIRES; i++)
-
{
-
cvReleaseMat(&mat_mags[i]);
-
}
-
-
int num_zpoints = filter_zpoints.size();
-
-
ScanPoint scan_point;
-
-
for (int y=0; y<img_gray->height-100; y+=2)
-
{
-
for(int x=0; x<img_gray->width-100; x+=2)
-
{
-
int zclass = 0;
-
-
for (std::vector<ZPoint>::iterator i = filter_zpoints.begin(); i != filter_zpoints.end(); i++)
-
{
-
int x1 = x + i->x;
-
int y1 = y + i->y;
-
-
ZGFeature *p_zg_f = p_zg_features + y1*img_gray->width + x1;
-
-
int num_mask = p_zg_f->num_mask;
-
int dire = i->dire;
-
-
if(p_zg_f->dire_ves[dire] > 2.0*p_zg_f->ve_average)
-
{
-
zclass++;
-
}
-
}
-
-
if ((float)zclass/(float)num_zpoints > SCANTHRE)
-
{
-
scan_point.x = x+IMGTW/2;
-
scan_point.y = y+IMGTH/2;
-
-
scan_points.push_back(scan_point);
-
}
-
}
-
-
std::cout << y << std::endl;
-
}
运行截图:
检测结果,目标被蓝圈标识出来了。
这个方法需要算一组检测图的卷积,这部分计算量比较大,但是如果使用 OpenCL 等并行计算技术应
该可以大大加速,下一步我准备处理目标的大小和方向变化这部分可能还比较简单,然后准备处理立体的目
标,这个可能比较复杂,目前的想法是对一个立体目标的不同角度分别取特征处理,只是这样计算量会比较
大。
zhujiang
2011.02.23
----------------------------------------------------------------------------------------
这几天又做了些实验,这个算法效果还不错,对光线变化和局部遮挡也不大敏感,就是速度比较慢,T6670 2.2G CPU 单线程检测一幅 800*600 的图像,旋转范围左右大约各 20 度,缩放 1.0 到 1.5 倍,大约用几十秒。
这样如果要根据一组不同角度的照片的特征去识别一个立体目标就必须加快处理速度,算法应该还可以改进,再有就是用 OpenCL 等平行处理技术了,这个局部边沿方向特征算法应该是适合并行化的,理论上中高端显卡上的 GPU 跑并行化的算法可以提速几十到几百倍,不过我的 4570 入门级小显卡估计效果有限。 :-)
zhujiang
2011.3.3
阅读(5591) | 评论(8) | 转发(0) |