一、概述
S. Leutenegger 等 2011年提出Binary Robust Invariant Scalable Keypoints(BRISK,使用AGAST算法检测角点,并在尺度空间金字塔中搜索最大值的同时用FAST Corner score过滤它们。 BRISK 描述基于识别每个特征的特征方向以实现旋转不变性。为了满足光照不变性,简单亮度测试的结果也被连接起来,描述符被构造为二进制字符串。 BRISK 特征对于比例、旋转和有限的仿射变化是不变的。
二、类参考
1、函数原型
static Ptr<BRISK> cv::BRISK::create ( int thresh = 30,
int octaves = 3,
float patternScale = 1.0f
)
static Ptr<BRISK> cv::BRISK::create ( const std::vector< float > & radiusList,
const std::vector< int > & numberList,
float dMax = 5.85f,
float dMin = 8.2f,
const std::vector< int > & indexChange = std::vector< int >()
)
static Ptr<BRISK> cv::BRISK::create ( int thresh,
int octaves,
const std::vector< float > & radiusList,
const std::vector< int > & numberList,
float dMax = 5.85f,
float dMin = 8.2f,
const std::vector< int > & indexChange = std::vector< int >()
)
2、参数详解
thresh |
AGAST 检测阈值分数。 |
octaves |
detection octaves. Use 0 to do single scale. |
patternScale |
将此比例应用于用于对关键点的邻域进行采样的模式。 |
radiusList |
定义在关键点周围采样的半径(以像素为单位)(对于关键点比例 1)。 |
numberList |
定义采样圆上的采样点数。 必须与 radiusList 大小相同. |
dMax |
用于描述符形成的短对的阈值(以像素为单位,用于关键点尺度 1)。 |
dMin |
用于方向确定的长配对的阈值(以像素为单位,用于关键点比例 1)。 |
indexChange |
位的索引重新映射。 |
三、OpenCV源码
1、源码路径
opencv\modules\features2d\src\brisk.cpp
2、源码代码
Ptr<BRISK> BRISK::create(int thresh, int octaves, float patternScale)
{
return makePtr<BRISK_Impl>(thresh, octaves, patternScale);
}
// custom setup
Ptr<BRISK> BRISK::create(const std::vector<float> &radiusList, const std::vector<int> &numberList,
float dMax, float dMin, const std::vector<int>& indexChange)
{
return makePtr<BRISK_Impl>(radiusList, numberList, dMax, dMin, indexChange);
}
Ptr<BRISK> BRISK::create(int thresh, int octaves, const std::vector<float> &radiusList,
const std::vector<int> &numberList, float dMax, float dMin,
const std::vector<int>& indexChange)
{
return makePtr<BRISK_Impl>(thresh, octaves, radiusList, numberList, dMax, dMin, indexChange);
}
void
BRISK_Impl::computeDescriptorsAndOrOrientation(InputArray _image, InputArray _mask, std::vector<KeyPoint>& keypoints,
OutputArray _descriptors, bool doDescriptors, bool doOrientation,
bool useProvidedKeypoints) const
{
Mat image = _image.getMat(), mask = _mask.getMat();
if( image.type() != CV_8UC1 )
cvtColor(image, image, COLOR_BGR2GRAY);
if (!useProvidedKeypoints)
{
doOrientation = true;
computeKeypointsNoOrientation(_image, _mask, keypoints);
}
//Remove keypoints very close to the border
size_t ksize = keypoints.size();
std::vector<int> kscales; // remember the scale per keypoint
kscales.resize(ksize);
static const float log2 = 0.693147180559945f;
static const float lb_scalerange = (float)(std::log(scalerange_) / (log2));
std::vector<cv::KeyPoint>::iterator beginning = keypoints.begin();
std::vector<int>::iterator beginningkscales = kscales.begin();
static const float basicSize06 = basicSize_ * 0.6f;
for (size_t k = 0; k < ksize; k++)
{
unsigned int scale;
scale = std::max((int) (scales_ / lb_scalerange * (std::log(keypoints[k].size / (basicSize06)) / log2) + 0.5), 0);
// saturate
if (scale >= scales_)
scale = scales_ - 1;
kscales[k] = scale;
const int border = sizeList_[scale];
const int border_x = image.cols - border;
const int border_y = image.rows - border;
if (RoiPredicate((float)border, (float)border, (float)border_x, (float)border_y, keypoints[k]))
{
keypoints.erase(beginning + k);
kscales.erase(beginningkscales + k);
if (k == 0)
{
beginning = keypoints.begin();
beginningkscales = kscales.begin();
}
ksize--;
k--;
}
}
// first, calculate the integral image over the whole image:
// current integral image
cv::Mat _integral; // the integral image
cv::integral(image, _integral);
int* _values = new int[points_]; // for temporary use
// resize the descriptors:
cv::Mat descriptors;
if (doDescriptors)
{
_descriptors.create((int)ksize, strings_, CV_8U);
descriptors = _descriptors.getMat();
descriptors.setTo(0);
}
// now do the extraction for all keypoints:
// temporary variables containing gray values at sample points:
int t1;
int t2;
// the feature orientation
const uchar* ptr = descriptors.ptr();
for (size_t k = 0; k < ksize; k++)
{
cv::KeyPoint& kp = keypoints[k];
const int& scale = kscales[k];
const float& x = kp.pt.x;
const float& y = kp.pt.y;
if (doOrientation)
{
// get the gray values in the unrotated pattern
for (unsigned int i = 0; i < points_; i++)
{
_values[i] = smoothedIntensity(image, _integral, x, y, scale, 0, i);
}
int direction0 = 0;
int direction1 = 0;
// now iterate through the long pairings
const BriskLongPair* max = longPairs_ + noLongPairs_;
for (BriskLongPair* iter = longPairs_; iter < max; ++iter)
{
CV_Assert(iter->i < points_ && iter->j < points_);
t1 = *(_values + iter->i);
t2 = *(_values + iter->j);
const int delta_t = (t1 - t2);
// update the direction:
const int tmp0 = delta_t * (iter->weighted_dx) / 1024;
const int tmp1 = delta_t * (iter->weighted_dy) / 1024;
direction0 += tmp0;
direction1 += tmp1;
}
kp.angle = (float)(atan2((float) direction1, (float) direction0) / CV_PI * 180.0);
if (!doDescriptors)
{
if (kp.angle < 0)
kp.angle += 360.f;
}
}
if (!doDescriptors)
continue;
int theta;
if (kp.angle==-1)
{
// don't compute the gradient direction, just assign a rotation of 0
theta = 0;
}
else
{
theta = (int) (n_rot_ * (kp.angle / (360.0)) + 0.5);
if (theta < 0)
theta += n_rot_;
if (theta >= int(n_rot_))
theta -= n_rot_;
}
if (kp.angle < 0)
kp.angle += 360.f;
// now also extract the stuff for the actual direction:
// let us compute the smoothed values
int shifter = 0;
//unsigned int mean=0;
// get the gray values in the rotated pattern
for (unsigned int i = 0; i < points_; i++)
{
_values[i] = smoothedIntensity(image, _integral, x, y, scale, theta, i);
}
// now iterate through all the pairings
unsigned int* ptr2 = (unsigned int*) ptr;
const BriskShortPair* max = shortPairs_ + noShortPairs_;
for (BriskShortPair* iter = shortPairs_; iter < max; ++iter)
{
CV_Assert(iter->i < points_ && iter->j < points_);
t1 = *(_values + iter->i);
t2 = *(_values + iter->j);
if (t1 > t2)
{
*ptr2 |= ((1) << shifter);
} // else already initialized with zero
// take care of the iterators:
++shifter;
if (shifter == 32)
{
shifter = 0;
++ptr2;
}
}
ptr += strings_;
}
// clean-up
delete[] _values;
}
四、效果图像示例