一般情况下图像的边沿含有图像的主要信息,利用图像边沿含有的信息可以做图像识别。用一组加强
了方向性的 gabor 核和图像做卷积可以提取出图像的边沿信息。首先把一般的 gabor 核改一下加强方向性,改过的核在这个程序里叫 zegabor。用一组不同方向的 zegabor 核和图像作卷积,在卷积结果图中,图像的边缘部分的坐标点上的不同方向结果中会有某一个或几个比平均值大很多,取这样的点做特征点,这些方向做特征值。
例如玩具猫,右图上的红点是取得的特征点,黄线段是特征点上图像局部边沿的方向,就是此点上卷积结果模值最大的 zegabor 核的方向。
gabor
特征是有方向的,这次的实验主要就是在图像中特征点比较密集方向变化较多的区域中把临近的特征点编成一小组,用其中一个特征值较大的点上的特征的方向做小组主方向,然后临近的几个小组编成一大组,记录大组中各小组的相对位置,做图像匹配时就在图像中寻找相似的特征组。
今天的图像匹配实验结果:
这和 sift 有些类似,但是这个用基于 gabor 的局部方向特征,程序还很原始,但是初步的实验结果还行。
先发个特征提取的实验代码,这个很原始,以后再改。
- IplImage *img_gray = cvCreateImage( cvGetSize(img_src), 8, 1 );
cvCvtColor(img_src, img_gray, CV_BGR2GRAY);
CvMat *mat_gray = cvCreateMat(img_gray->height, img_gray->width, CV_32FC1);
cvConvert(img_gray, mat_gray);
CvMat *mat_mask = cvCreateMat(img_gray->height, img_gray->width, CV_32SC1);
CvMat *mat_mask01 = cvCreateMat(img_gray->height, img_gray->width, CV_32SC1);
CvMat *mat_major = cvCreateMat(img_gray->height, img_gray->width, CV_32SC1);
CvMat *mat_max_ve = cvCreateMat(img_gray->height, img_gray->width, CV_32FC1);
cvZero(mat_mask);
cvZero(mat_mask01);
cvZero(mat_major);
cvZero(mat_max_ve);
for (int y=5; yheight-5; y+=1)
{
for (int x=5; xwidth-5; x+=1)
{
ZGFeature *p_features = local_features + y*img_gray->width + x;
CV_MAT_ELEM(*mat_max_ve, float, y, x) = p_features->max_ve;
if (p_features->max_ve < TRNOISE || p_features->max_ve / p_features->ve_average < TRIFTH) continue;
CV_MAT_ELEM(*mat_mask, int, y, x) = 255;
for (int mx=-3; mx<=3; mx++)
{
for (int my=-3; my<=3; my++)
{
int x1 = x + mx;
int y1 = y + my;
ZGFeature *p_features01 = local_features + y1*img_gray->width + x1;
if (p_features->max_ve < p_features01->max_ve)
{
CV_MAT_ELEM(*mat_mask, int, y, x) = 0;
}
}
}
}
}
for (int y=5; yheight-5; y+=1)
{
for (int x=5; xwidth-5; x+=1)
{
ZGFeature *p_features = local_features + y*img_gray->width + x;
if (p_features->max_ve < TRNOISE || p_features->max_ve / p_features->ve_average < TRIFTH) continue;
CV_MAT_ELEM(*mat_mask01, int, y, x) = 255;
for (int mx=-4; mx<=4; mx++)
{
for (int my=-4; my<=4; my++)
{
int x1 = x + mx;
int y1 = y + my;
ZGFeature *p_features01 = local_features + y1*img_gray->width + x1;
if (p_features->max_ve < p_features01->max_ve)
{
CV_MAT_ELEM(*mat_mask01, int, y, x) = 0;
}
}
}
}
}
cvNormalize((CvMat*)mat_max_ve, (CvMat*)mat_max_ve, 0, 255, CV_MINMAX);
MatchFeatureVec major_features;
MatchFeature major_feature;
for (int y=20; yheight-20; y+=1)
{
for (int x=20; xwidth-20; x+=1)
{
int num_mask;
num_mask = 0;
int ve = CV_MAT_ELEM(*mat_max_ve, float, y, x);
int mask = CV_MAT_ELEM(*mat_mask01, int, y, x);
if (ve > 40 && mask == 255)
//if (mask == 255)
{
CV_MAT_ELEM(*mat_major, int, y, x) = 255;
ZGFeature *p_features = local_features + y*img_gray->width + x;
major_feature.x = x;
major_feature.y = y;
major_feature.dire = p_features->max_dire;
major_feature.az = major_feature.dire*(CV_PI/32.0);
major_feature.a = major_feature.az;
major_features.push_back(major_feature);
}
}
}
for (MatchFeatureVec::iterator i = major_features.begin(); i != major_features.end(); i++)
{
int xz = i->x;
int yz = i->y;
float az = i->dire*(CV_PI/32.0);
int direz = i->dire;
MatchFeatureVec match_features;
match_features.clear();
match_features.push_back(*i);
for (int mx=-30; mx<=30; mx+=1)
{
for (int my=-30; my<=30; my+=1)
{
int x1 = xz + mx;
int y1 = yz + my;
if (x1 < 0 || x1 >= img_gray->width || y1 < 0 || y1 >= img_gray->height) continue;
int mask = CV_MAT_ELEM(*mat_mask, int, y1, x1);
int ve = CV_MAT_ELEM(*mat_max_ve, float, y1, x1);
if ((abs(mx) < 5 && abs(my) < 5) || mask == 0 || ve < 20) continue;
//if ((abs(mx) < 5 && abs(my) < 5) || mask == 0) continue;
ZGFeature *p_features = local_features + y1*img_gray->width + x1;
//if (p_features->max_dire == direz) continue;
MatchFeature match_feature;
int xr = x1 - xz;
int yr = yz - y1;
float r = sqrt(xr*xr + yr*yr);
float a = atan2(yr, xr);
if (xr < 0.0000001 && xr > -0.0000001)
{
if (yr > 0) a = 90/360.0*(2*CV_PI);
if (yr < 0) a = 270/360.0*(2*CV_PI);
}
match_feature.x = x1;
match_feature.y = y1;
match_feature.xr = xr;
match_feature.yr = yr;
match_feature.r = r;
match_feature.a = a;
match_feature.r1 = r;
match_feature.dire = p_features->max_dire;
match_feature.a1 = a - az;
int dire1 = p_features->max_dire - direz;
if (dire1 < 0) dire1 = dire1 + ZGDIRES;
dire1 = dire1%ZGDIRES;
match_feature.dire1 = dire1;
if (dire1) match_features.push_back(match_feature);
}
}
if (match_features.size() >= 5) match_feature_vvs.push_back(match_features);
//if (match_features.size() > 6 && match_features.size() < 10) match_feature_vvs.push_back(match_features);
}
IplImage *img_debug = cvCreateImage( cvGetSize(img_gray), 8, 1 );
cvZero(img_debug);
cvConvert(mat_mask, img_debug);
//cvConvert(mat_mask01, img_debug);
//cvConvert(mat_major, img_debug);
cvNamedWindow("mat_mask", 1);
cvShowImage("mat_mask", img_debug);
阅读(11109) | 评论(10) | 转发(0) |