快速双边滤波 附完整C代码
很早之前写过《双边滤波算法的简易实现bilateralFilter》。
当时学习参考的代码来自cuda的样例。
相关代码可以参阅:
https://github.com/johng12/cudaSamples/tree/master/cudaSamples/3_Imaging/bilateralFilter
由于算法逻辑非常清晰,就不多解释了。
需要补课的,请移步《o(1)复杂度之双边滤波算法的原理、流程、实现及效果。》
代码见:bilateralFilter_cpu.cpp 文件。
#include <math.h>
#include <string.h>
////////////////////////////////////////////////////////////////////////////////
// export C interface
#define EPSILON 1e-3
extern "C" void updateGaussianGold(float delta, int radius);
extern "C" void bilateralFilterGold(unsigned int *pSrc,
unsigned int *pDest,
float e_d,
int w, int h, int r);
//variables
];
struct float4
{
float x;
float y;
float z;
float w;
float4() {};
float4(float value)
{
x = y = z = w = value;
}
};
void updateGaussianGold(float delta, int radius)
{
; i < * radius + ; i++)
{
int x = i - radius;
gaussian[i] = exp(-(x * x) /
( * delta * delta));
}
}
float heuclideanLen(float4 a, float4 b, float d)
{
float mod = (b.x - a.x) * (b.x - a.x) +
(b.y - a.y) * (b.y - a.y) +
(b.z - a.z) * (b.z - a.z) +
(b.w - a.w) * (b.w - a.w);
* d * d));
}
unsigned int hrgbaFloatToInt(float4 rgba)
{
unsigned ;
unsigned ;
unsigned ;
unsigned int x = ((unsigned int)(fabs(rgba.x) * 255.0f)) & 0xff;
return (w | z | y | x);
}
float4 hrgbaIntToFloat(unsigned int c)
{
float4 rgba;
rgba.x = (c & 0xff) * 0.003921568627f; // /255.0f;
rgba.y = ((c>>) & 0xff) * 0.003921568627f; // /255.0f;
rgba.z = ((c>>) & 0xff) * 0.003921568627f; // /255.0f;
rgba.w = ((c>>) & 0xff) * 0.003921568627f; // /255.0f;
return rgba;
}
float4 mul(float a, float4 b)
{
float4 ans;
ans.x = a * b.x;
ans.y = a * b.y;
ans.z = a * b.z;
ans.w = a * b.w;
return ans;
}
float4 add4(float4 a, float4 b)
{
float4 ans;
ans.x = a.x + b.x;
ans.y = a.y + b.y;
ans.z = a.z + b.z;
ans.w = a.w + b.w;
return ans;
}
void bilateralFilterGold(unsigned int *pSrc,
unsigned int *pDest,
float e_d,
int w, int h, int r)
{
float4 *hImage = new float4[w * h];
float domainDist, colorDist, factor;
; y < h; y++)
{
; x < w; x++)
{
hImage[y * w + x] = hrgbaIntToFloat(pSrc[y * w + x]);
}
}
; y < h; y++)
{
; x < w; x++)
{
float4 t(0.0f);
float sum = 0.0f;
for (int i = -r; i <= r; i++)
{
int neighborY = y + i;
//clamp the neighbor pixel, prevent overflow
)
{
neighborY = ;
}
else if (neighborY >= h)
{
neighborY = h - ;
}
for (int j = -r; j <= r; j++)
{
domainDist = gaussian[r + i] * gaussian[r + j];
//clamp the neighbor pixel, prevent overflow
int neighborX = x + j;
)
{
neighborX = ;
}
else if (neighborX >= w)
{
neighborX = w - ;
}
colorDist = heuclideanLen(hImage[neighborY * w + neighborX], hImage[y * w + x], e_d);
factor = domainDist * colorDist;
sum += factor;
t = add4(t, mul(factor, hImage[neighborY * w + neighborX]));
}
}
pDest[y * w + x] = hrgbaFloatToInt(mul( / sum, t));
}
}
delete[] hImage;
}
在网上很多人都采用双边滤波作为磨皮算法,包括比较知名的gpuimage库。
用的也是小半径的双边,而双边滤波有很多快速算法的变种。
详情可参阅:http://people.csail.mit.edu/sparis/bf/
而就在几个月前,github上有个朋友放出来了一份快速双边滤波的tiny版本代码。
https://github.com/ufoym/RecursiveBF/
代码见:rbf.hpp
#ifndef INCLUDE_RBF
#define INCLUDE_RBF
#include <math.h>
#include <string.h>
#define QX_DEF_CHAR_MAX 255
/* ======================================================================
RecursiveBF: A lightweight library for recursive bilateral filtering.
-------------------------------------------------------------------------
Intro: Recursive bilateral filtering (developed by Qingxiong Yang)
is pretty fast compared with most edge-preserving filtering
methods.
- computational complexity is linear in both input size and
dimensionality
- takes about 43 ms to process a one mega-pixel color image
(i7 1.8GHz & 4GB memory)
- about 18x faster than Fast high-dimensional filtering
using the permutohedral lattice
- about 86x faster than Gaussian kd-trees for fast high-
dimensional filtering
Usage: // ----------------------------------------------------------
// Basic Usage
// ----------------------------------------------------------
unsigned char * img = ...; // input image
unsigned char * img_out = 0; // output image
int width = ..., height = ..., channel = ...; // image size
recursive_bf(img, img_out,
sigma_spatial, sigma_range,
width, height, channel);
// ----------------------------------------------------------
// Advanced: using external buffer for better performance
// ----------------------------------------------------------
unsigned char * img = ...; // input image
unsigned char * img_out = 0; // output image
int width = ..., height = ..., channel = ...; // image size
float * buffer = new float[ // external buf
( width * height* channel
+ width * height
+ width * channel
+ width) * 2];
recursive_bf(img, img_out,
sigma_spatial, sigma_range,
width, height, channel,
buffer);
delete[] buffer;
Notice: Large sigma_spatial/sigma_range parameter may results in
visible artifact which can be removed by an additional
filter with small sigma_spatial/sigma_range parameter.
-------------------------------------------------------------------------
Reference: Qingxiong Yang, Recursive Bilateral Filtering,
European Conference on Computer Vision (ECCV) 2012, 399-413.
====================================================================== */
inline void recursive_bf(
unsigned char * img_in,
unsigned char *& img_out,
float sigma_spatial, float sigma_range,
int width, int height, int channel,
float * buffer /*= 0*/);
// ----------------------------------------------------------------------
inline void _recursive_bf(
unsigned char * img,
float sigma_spatial, float sigma_range,
int width, int height, int channel,
)
{
const int width_height = width * height;
const int width_channel = width * channel;
const int width_height_channel = width * height * channel;
);
if (is_buffer_internal)
buffer = new float[(width_height_channel + width_height
+ width_channel + width) * ];
float * img_out_f = buffer;
float * img_temp = &img_out_f[width_height_channel];
float * map_factor_a = &img_temp[width_height_channel];
float * map_factor_b = &map_factor_a[width_height];
float * slice_factor_a = &map_factor_b[width_height];
float * slice_factor_b = &slice_factor_a[width_channel];
float * line_factor_a = &slice_factor_b[width_channel];
float * line_factor_b = &line_factor_a[width];
//compute a lookup table
];
float inv_sigma_range = 1.0f / (sigma_range * QX_DEF_CHAR_MAX);
; i <= QX_DEF_CHAR_MAX; i++)
range_table[i] = static_cast<float>(exp(-i * inv_sigma_range));
float alpha = static_cast<float>(exp(-sqrt(2.0) / (sigma_spatial * width)));
float ypr, ypg, ypb, ycr, ycg, ycb;
float fp, fc;
- alpha;
; y < height; y++)
{
float * temp_x = &img_temp[y * width_channel];
unsigned char * in_x = &img[y * width_channel];
unsigned char * texture_x = &img[y * width_channel];
*temp_x++ = ypr = *in_x++;
*temp_x++ = ypg = *in_x++;
*temp_x++ = ypb = *in_x++;
unsigned char tpr = *texture_x++;
unsigned char tpg = *texture_x++;
unsigned char tpb = *texture_x++;
float * temp_factor_x = &map_factor_a[y * width];
*temp_factor_x++ = fp = ;
// from left to right
; x < width; x++)
{
unsigned char tcr = *texture_x++;
unsigned char tcg = *texture_x++;
unsigned char tcb = *texture_x++;
unsigned char dr = abs(tcr - tpr);
unsigned char dg = abs(tcg - tpg);
unsigned char db = abs(tcb - tpb);
) + dg + db) >> );
float weight = range_table[range_dist];
float alpha_ = weight*alpha;
*temp_x++ = ycr = inv_alpha_*(*in_x++) + alpha_*ypr;
*temp_x++ = ycg = inv_alpha_*(*in_x++) + alpha_*ypg;
*temp_x++ = ycb = inv_alpha_*(*in_x++) + alpha_*ypb;
tpr = tcr; tpg = tcg; tpb = tcb;
ypr = ycr; ypg = ycg; ypb = ycb;
*temp_factor_x++ = fc = inv_alpha_ + alpha_*fp;
fp = fc;
}
*--temp_x; *temp_x = 0.5f*((*temp_x) + (*--in_x));
*--temp_x; *temp_x = 0.5f*((*temp_x) + (*--in_x));
*--temp_x; *temp_x = 0.5f*((*temp_x) + (*--in_x));
tpr = *--texture_x;
tpg = *--texture_x;
tpb = *--texture_x;
ypr = *in_x; ypg = *in_x; ypb = *in_x;
*--temp_factor_x; *temp_factor_x = );
fp = ;
// from right to left
; x >= ; x--)
{
unsigned char tcr = *--texture_x;
unsigned char tcg = *--texture_x;
unsigned char tcb = *--texture_x;
unsigned char dr = abs(tcr - tpr);
unsigned char dg = abs(tcg - tpg);
unsigned char db = abs(tcb - tpb);
) + dg + db) >> );
float weight = range_table[range_dist];
float alpha_ = weight * alpha;
ycr = inv_alpha_ * (*--in_x) + alpha_ * ypr;
ycg = inv_alpha_ * (*--in_x) + alpha_ * ypg;
ycb = inv_alpha_ * (*--in_x) + alpha_ * ypb;
*--temp_x; *temp_x = 0.5f*((*temp_x) + ycr);
*--temp_x; *temp_x = 0.5f*((*temp_x) + ycg);
*--temp_x; *temp_x = 0.5f*((*temp_x) + ycb);
tpr = tcr; tpg = tcg; tpb = tcb;
ypr = ycr; ypg = ycg; ypb = ycb;
fc = inv_alpha_ + alpha_*fp;
*--temp_factor_x;
*temp_factor_x = 0.5f*((*temp_factor_x) + fc);
fp = fc;
}
}
alpha = static_cast<float>(exp(-sqrt(2.0) / (sigma_spatial * height)));
inv_alpha_ = - alpha;
float * ycy, * ypy, * xcy;
unsigned char * tcy, * tpy;
memcpy(img_out_f, img_temp, sizeof(float)* width_channel);
float * in_factor = map_factor_a;
float*ycf, *ypf, *xcf;
memcpy(map_factor_b, in_factor, sizeof(float) * width);
; y < height; y++)
{
tpy = &img[(y - ) * width_channel];
tcy = &img[y * width_channel];
xcy = &img_temp[y * width_channel];
ypy = &img_out_f[(y - ) * width_channel];
ycy = &img_out_f[y * width_channel];
xcf = &in_factor[y * width];
ypf = &map_factor_b[(y - ) * width];
ycf = &map_factor_b[y * width];
; x < width; x++)
{
unsigned char dr = abs((*tcy++) - (*tpy++));
unsigned char dg = abs((*tcy++) - (*tpy++));
unsigned char db = abs((*tcy++) - (*tpy++));
) + dg + db) >> );
float weight = range_table[range_dist];
float alpha_ = weight*alpha;
; c < channel; c++)
*ycy++ = inv_alpha_*(*xcy++) + alpha_*(*ypy++);
*ycf++ = inv_alpha_*(*xcf++) + alpha_*(*ypf++);
}
}
;
ycf = line_factor_a;
ypf = line_factor_b;
memcpy(ypf, &in_factor[h1 * width], sizeof(float) * width);
; x < width; x++)
map_factor_b[h1 * width + x] = 0.5f*(map_factor_b[h1 * width + x] + ypf[x]);
ycy = slice_factor_a;
ypy = slice_factor_b;
memcpy(ypy, &img_temp[h1 * width_channel], sizeof(float)* width_channel);
;
; x < width; x++) {
; c < channel; c++) {
int idx = (h1 * width + x) * channel + c;
img_out_f[idx] = 0.5f*(img_out_f[idx] + ypy[k++]) / map_factor_b[h1 * width + x];
}
}
; y >= ; y--)
{
tpy = &img[(y + ) * width_channel];
tcy = &img[y * width_channel];
xcy = &img_temp[y * width_channel];
float*ycy_ = ycy;
float*ypy_ = ypy;
float*out_ = &img_out_f[y * width_channel];
xcf = &in_factor[y * width];
float*ycf_ = ycf;
float*ypf_ = ypf;
float*factor_ = &map_factor_b[y * width];
; x < width; x++)
{
unsigned char dr = abs((*tcy++) - (*tpy++));
unsigned char dg = abs((*tcy++) - (*tpy++));
unsigned char db = abs((*tcy++) - (*tpy++));
) + dg + db) >> );
float weight = range_table[range_dist];
float alpha_ = weight*alpha;
float fcc = inv_alpha_*(*xcf++) + alpha_*(*ypf_++);
*ycf_++ = fcc;
*factor_ = 0.5f * (*factor_ + fcc);
; c < channel; c++)
{
float ycc = inv_alpha_*(*xcy++) + alpha_*(*ypy_++);
*ycy_++ = ycc;
*out_ = 0.5f * (*out_ + ycc) / (*factor_);
*out_++;
}
*factor_++;
}
memcpy(ypy, ycy, sizeof(float) * width_channel);
memcpy(ypf, ycf, sizeof(float) * width);
}
; i < width_height_channel; ++i)
img[i] = static_cast<unsigned char>(img_out_f[i]);
if (is_buffer_internal)
delete[] buffer;
}
inline void recursive_bf(
unsigned char * img_in,
unsigned char *& img_out,
float sigma_spatial, float sigma_range,
int width, int height, int channel,
)
{
)
img_out = new unsigned char[width * height * channel];
; i < width * height * channel; ++i)
img_out[i] = img_in[i];
_recursive_bf(img_out, sigma_spatial, sigma_range, width, height, channel, buffer);
}
#endif // INCLUDE_RBF
而这份代码的作者,真的很粗心,代码中有多处逻辑写错,代码风格也有待加强。
当然也有,有心者,在此基础上做了修正。
https://github.com/Fig1024/OP_RBF
这个作者还在这个基础上做了simd指令集优化。
优化代码暂时不看。
关注下基础实现的代码。即RBFilterPlain.cpp:
#include "stdafx.h"
#include "RBFilterPlain.h"
#include "stdafx.h"
#include <algorithm>
using namespace std;
#define QX_DEF_CHAR_MAX 255
CRBFilterPlain::CRBFilterPlain()
{
}
CRBFilterPlain::~CRBFilterPlain()
{
releaseMemory();
}
// assumes 3/4 channel images, 1 byte per channel
void CRBFilterPlain::reserveMemory(int max_width, int max_height, int channels)
{
// basic sanity check
_ASSERT(max_width >= && max_width < );
_ASSERT(max_height >= && max_height < );
_ASSERT(channels >= && channels <= );
releaseMemory();
m_reserve_width = max_width;
m_reserve_height = max_height;
m_reserve_channels = channels;
int width_height = m_reserve_width * m_reserve_height;
int width_height_channel = width_height * m_reserve_channels;
m_left_pass_color = new float[width_height_channel];
m_left_pass_factor = new float[width_height];
m_right_pass_color = new float[width_height_channel];
m_right_pass_factor = new float[width_height];
m_down_pass_color = new float[width_height_channel];
m_down_pass_factor = new float[width_height];
m_up_pass_color = new float[width_height_channel];
m_up_pass_factor = new float[width_height];
}
void CRBFilterPlain::releaseMemory()
{
m_reserve_width = ;
m_reserve_height = ;
m_reserve_channels = ;
if (m_left_pass_color)
{
delete[] m_left_pass_color;
m_left_pass_color = nullptr;
}
if (m_left_pass_factor)
{
delete[] m_left_pass_factor;
m_left_pass_factor = nullptr;
}
if (m_right_pass_color)
{
delete[] m_right_pass_color;
m_right_pass_color = nullptr;
}
if (m_right_pass_factor)
{
delete[] m_right_pass_factor;
m_right_pass_factor = nullptr;
}
if (m_down_pass_color)
{
delete[] m_down_pass_color;
m_down_pass_color = nullptr;
}
if (m_down_pass_factor)
{
delete[] m_down_pass_factor;
m_down_pass_factor = nullptr;
}
if (m_up_pass_color)
{
delete[] m_up_pass_color;
m_up_pass_color = nullptr;
}
if (m_up_pass_factor)
{
delete[] m_up_pass_factor;
m_up_pass_factor = nullptr;
}
}
int CRBFilterPlain::getDiffFactor(const unsigned char* color1, const unsigned char* color2) const
{
int final_diff;
];
// find absolute difference between each component
; i < m_reserve_channels; i++)
{
component_diff[i] = abs(color1[i] - color2[i]);
}
// based on number of components, produce a single difference value in the 0-255 range
switch (m_reserve_channels)
{
:
final_diff = component_diff[];
break;
:
final_diff = ((component_diff[] + component_diff[]) >> );
break;
:
final_diff = ((component_diff[] + component_diff[]) >> ) + (component_diff[] >> );
break;
:
final_diff = ((component_diff[] + component_diff[] + component_diff[] + component_diff[]) >> );
break;
default:
final_diff = ;
}
_ASSERT(final_diff >= && final_diff <= );
return final_diff;
}
// memory must be reserved before calling image filter
// this implementation of filter uses plain C++, single threaded
// channel count must be 3 or 4 (alpha not used)
void CRBFilterPlain::filter(unsigned char* img_src, unsigned char* img_dst,
float sigma_spatial, float sigma_range,
int width, int height, int channel)
{
_ASSERT(img_src);
_ASSERT(img_dst);
_ASSERT(m_reserve_channels == channel);
_ASSERT(m_reserve_width >= width);
_ASSERT(m_reserve_height >= height);
// compute a lookup table
)));
.f - alpha_f;
];
float inv_sigma_range = 1.0f / (sigma_range * QX_DEF_CHAR_MAX);
{
.f;
; i <= QX_DEF_CHAR_MAX; i++, ii -= .f)
{
range_table_f[i] = alpha_f * exp(ii * inv_sigma_range);
}
}
///////////////
// Left pass
{
const unsigned char* src_color = img_src;
float* left_pass_color = m_left_pass_color;
float* left_pass_factor = m_left_pass_factor;
; y < height; y++)
{
const unsigned char* src_prev = src_color;
const float* prev_factor = left_pass_factor;
const float* prev_color = left_pass_color;
// process 1st pixel separately since it has no previous
*left_pass_factor++ = .f;
; c < channel; c++)
{
*left_pass_color++ = *src_color++;
}
// handle other pixels
; x < width; x++)
{
// determine difference in pixel color between current and previous
// calculation is different depending on number of channels
int diff = getDiffFactor(src_color, src_prev);
src_prev = src_color;
float alpha_f = range_table_f[diff];
*left_pass_factor++ = inv_alpha_f + alpha_f * (*prev_factor++);
; c < channel; c++)
{
*left_pass_color++ = inv_alpha_f * (*src_color++) + alpha_f * (*prev_color++);
}
}
}
}
///////////////
// Right pass
{
// start from end and then go up to begining
;
const unsigned char* src_color = img_src + last_index;
float* right_pass_color = m_right_pass_color + last_index;
;
; y < height; y++)
{
const unsigned char* src_prev = src_color;
const float* prev_factor = right_pass_factor;
const float* prev_color = right_pass_color;
// process 1st pixel separately since it has no previous
*right_pass_factor-- = .f;
; c < channel; c++)
{
*right_pass_color-- = *src_color--;
}
// handle other pixels
; x < width; x++)
{
// determine difference in pixel color between current and previous
// calculation is different depending on number of channels
);
// src_prev = src_color;
float alpha_f = range_table_f[diff];
*right_pass_factor-- = inv_alpha_f + alpha_f * (*prev_factor--);
; c < channel; c++)
{
*right_pass_color-- = inv_alpha_f * (*src_color--) + alpha_f * (*prev_color--);
}
}
}
}
// vertical pass will be applied on top on horizontal pass, while using pixel differences from original image
// result color stored in 'm_left_pass_color' and vertical pass will use it as source color
{
float* img_out = m_left_pass_color; // use as temporary buffer
const float* left_pass_color = m_left_pass_color;
const float* left_pass_factor = m_left_pass_factor;
const float* right_pass_color = m_right_pass_color;
const float* right_pass_factor = m_right_pass_factor;
int width_height = width * height;
; i < width_height; i++)
{
// average color divided by average factor
.f / ((*left_pass_factor++) + (*right_pass_factor++));
; c < channel; c++)
{
*img_out++ = (factor * ((*left_pass_color++) + (*right_pass_color++)));
}
}
}
///////////////
// Down pass
{
const float* src_color_hor = m_left_pass_color; // result of horizontal pass filter
const unsigned char* src_color = img_src;
float* down_pass_color = m_down_pass_color;
float* down_pass_factor = m_down_pass_factor;
const unsigned char* src_prev = src_color;
const float* prev_color = down_pass_color;
const float* prev_factor = down_pass_factor;
// 1st line done separately because no previous line
; x < width; x++)
{
*down_pass_factor++ = .f;
; c < channel; c++)
{
*down_pass_color++ = *src_color_hor++;
}
src_color += channel;
}
// handle other lines
; y < height; y++)
{
; x < width; x++)
{
// determine difference in pixel color between current and previous
// calculation is different depending on number of channels
int diff = getDiffFactor(src_color, src_prev);
src_prev += channel;
src_color += channel;
float alpha_f = range_table_f[diff];
*down_pass_factor++ = inv_alpha_f + alpha_f * (*prev_factor++);
; c < channel; c++)
{
*down_pass_color++ = inv_alpha_f * (*src_color_hor++) + alpha_f * (*prev_color++);
}
}
}
}
///////////////
// Up pass
{
// start from end and then go up to begining
;
const unsigned char* src_color = img_src + last_index;
const float* src_color_hor = m_left_pass_color + last_index; // result of horizontal pass filter
float* up_pass_color = m_up_pass_color + last_index;
);
// const unsigned char* src_prev = src_color;
const float* prev_color = up_pass_color;
const float* prev_factor = up_pass_factor;
// 1st line done separately because no previous line
; x < width; x++)
{
*up_pass_factor-- = .f;
; c < channel; c++)
{
*up_pass_color-- = *src_color_hor--;
}
src_color -= channel;
}
// handle other lines
; y < height; y++)
{
; x < width; x++)
{
// determine difference in pixel color between current and previous
// calculation is different depending on number of channels
src_color -= channel;
int diff = getDiffFactor(src_color, src_color + width * channel);
float alpha_f = range_table_f[diff];
*up_pass_factor-- = inv_alpha_f + alpha_f * (*prev_factor--);
; c < channel; c++)
{
*up_pass_color-- = inv_alpha_f * (*src_color_hor--) + alpha_f * (*prev_color--);
}
}
}
}
///////////////
// average result of vertical pass is written to output buffer
{
const float* down_pass_color = m_down_pass_color;
const float* down_pass_factor = m_down_pass_factor;
const float* up_pass_color = m_up_pass_color;
const float* up_pass_factor = m_up_pass_factor;
int width_height = width * height;
; i < width_height; i++)
{
// average color divided by average factor
.f / ((*up_pass_factor++) + (*down_pass_factor++));
; c < channel; c++)
{
*img_dst++ = (unsigned char)(factor * ((*up_pass_color++) + (*down_pass_color++)));
}
}
}
}
这份代码把算法逻辑梳理得非常清晰。
一开始我也觉得这个代码效果和速度都很不错了。
但是没有仔细看,经过朋友的提醒,说是这个算法有严重的bug,会有线条瑕疵。
把对应的参数值调大之后,的确。特别明显的条纹效应,直接毁图了。
后来我经过一段时间的思考和研究,明确了算法的问题所在。
针对这个算法的修复,已经有明确思路了。
在这里,暂时先放出我经过稍加整理了OP_RBF的完整C代码版本。
做了一些代码逻辑上的优化,降低内存的使用。
int getDiffFactor(const unsigned char* color1, const unsigned char* color2, const int & channels)
{
int final_diff;
];
// find absolute difference between each component
; i < channels; i++)
{
component_diff[i] = abs(color1[i] - color2[i]);
}
// based on number of components, produce a single difference value in the 0-255 range
switch (channels)
{
:
final_diff = component_diff[];
break;
:
final_diff = ((component_diff[] + component_diff[]) >> );
break;
:
final_diff = ((component_diff[] + component_diff[]) >> ) + (component_diff[] >> );
break;
:
final_diff = ((component_diff[] + component_diff[] + component_diff[] + component_diff[]) >> );
break;
default:
final_diff = ;
}
_ASSERT(final_diff >= && final_diff <= );
return final_diff;
}
void CRB_HorizontalFilter(unsigned char* Input, unsigned char* Output, int Width, int Height, int Channels, float * range_table_f, float inv_alpha_f, float* left_Color_Buffer, float* left_Factor_Buffer, float* right_Color_Buffer, float* right_Factor_Buffer)
{
// Left pass and Right pass
int Stride = Width * Channels;
const unsigned char* src_left_color = Input;
float* left_Color = left_Color_Buffer;
float* left_Factor = left_Factor_Buffer;
;
const unsigned char* src_right_color = Input + last_index;
float* right_Color = right_Color_Buffer + last_index;
;
; y < Height; y++)
{
const unsigned char* src_left_prev = Input;
const float* left_prev_factor = left_Factor;
const float* left_prev_color = left_Color;
const unsigned char* src_right_prev = src_right_color;
const float* right_prev_factor = right_Factor;
const float* right_prev_color = right_Color;
// process 1st pixel separately since it has no previous
{
//if x = 0
*left_Factor++ = .f;
*right_Factor-- = .f;
; c < Channels; c++)
{
*left_Color++ = *src_left_color++;
*right_Color-- = *src_right_color--;
}
}
// handle other pixels
; x < Width; x++)
{
// determine difference in pixel color between current and previous
// calculation is different depending on number of channels
int left_diff = getDiffFactor(src_left_color, src_left_prev, Channels);
src_left_prev = src_left_color;
int right_diff = getDiffFactor(src_right_color, src_right_color - Channels, Channels);
src_right_prev = src_right_color;
float left_alpha_f = range_table_f[left_diff];
float right_alpha_f = range_table_f[right_diff];
*left_Factor++ = inv_alpha_f + left_alpha_f * (*left_prev_factor++);
*right_Factor-- = inv_alpha_f + right_alpha_f * (*right_prev_factor--);
; c < Channels; c++)
{
*left_Color++ = (inv_alpha_f * (*src_left_color++) + left_alpha_f * (*left_prev_color++));
*right_Color-- = (inv_alpha_f * (*src_right_color--) + right_alpha_f * (*right_prev_color--));
}
}
}
// vertical pass will be applied on top on horizontal pass, while using pixel differences from original image
// result color stored in 'leftcolor' and vertical pass will use it as source color
{
unsigned char* dst_color = Output; // use as temporary buffer
const float* leftcolor = left_Color_Buffer;
const float* leftfactor = left_Factor_Buffer;
const float* rightcolor = right_Color_Buffer;
const float* rightfactor = right_Factor_Buffer;
int width_height = Width * Height;
; i < width_height; i++)
{
// average color divided by average factor
.f / ((*leftfactor++) + (*rightfactor++));
; c < Channels; c++)
{
*dst_color++ = (factor * ((*leftcolor++) + (*rightcolor++)));
}
}
}
}
void CRB_VerticalFilter(unsigned char* Input, unsigned char* Output, int Width, int Height, int Channels, float * range_table_f, float inv_alpha_f, float* down_Color_Buffer, float* down_Factor_Buffer, float* up_Color_Buffer, float* up_Factor_Buffer)
{
// Down pass and Up pass
int Stride = Width * Channels;
const unsigned char* src_color_first_hor = Output; // result of horizontal pass filter
const unsigned char* src_down_color = Input;
float* down_color = down_Color_Buffer;
float* down_factor = down_Factor_Buffer;
const unsigned char* src_down_prev = src_down_color;
const float* down_prev_color = down_color;
const float* down_prev_factor = down_factor;
;
const unsigned char* src_up_color = Input + last_index;
const unsigned char* src_color_last_hor = Output + last_index; // result of horizontal pass filter
float* up_color = up_Color_Buffer + last_index;
);
const float* up_prev_color = up_color;
const float* up_prev_factor = up_factor;
// 1st line done separately because no previous line
{
//if y=0
; x < Width; x++)
{
*down_factor++ = .f;
*up_factor-- = .f;
; c < Channels; c++)
{
*down_color++ = *src_color_first_hor++;
*up_color-- = *src_color_last_hor--;
}
src_down_color += Channels;
src_up_color -= Channels;
}
}
// handle other lines
; y < Height; y++)
{
; x < Width; x++)
{
// determine difference in pixel color between current and previous
// calculation is different depending on number of channels
int down_diff = getDiffFactor(src_down_color, src_down_prev, Channels);
src_down_prev += Channels;
src_down_color += Channels;
src_up_color -= Channels;
int up_diff = getDiffFactor(src_up_color, src_up_color + Stride, Channels);
float down_alpha_f = range_table_f[down_diff];
float up_alpha_f = range_table_f[up_diff];
*down_factor++ = inv_alpha_f + down_alpha_f * (*down_prev_factor++);
*up_factor-- = inv_alpha_f + up_alpha_f * (*up_prev_factor--);
; c < Channels; c++)
{
*down_color++ = inv_alpha_f * (*src_color_first_hor++) + down_alpha_f * (*down_prev_color++);
*up_color-- = inv_alpha_f * (*src_color_last_hor--) + up_alpha_f * (*up_prev_color--);
}
}
}
// average result of vertical pass is written to output buffer
{
unsigned char *dst_color = Output;
const float* downcolor = down_Color_Buffer;
const float* downfactor = down_Factor_Buffer;
const float* upcolor = up_Color_Buffer;
const float* upfactor = up_Factor_Buffer;
int width_height = Width * Height;
; i < width_height; i++)
{
// average color divided by average factor
.f / ((*upfactor++) + (*downfactor++));
; c < Channels; c++)
{
*dst_color++ = (factor * ((*upcolor++) + (*downcolor++)));
}
}
}
}
// memory must be reserved before calling image filter
// this implementation of filter uses plain C++, single threaded
// channel count must be 3 or 4 (alpha not used)
void CRBFilter(unsigned char* Input, unsigned char* Output, int Width, int Height, int Stride, float sigmaSpatial, float sigmaRange)
{
int Channels = Stride / Width;
int reserveWidth = Width;
int reserveHeight = Height;
// basic sanity check
_ASSERT(Input);
_ASSERT(Output);
_ASSERT(reserveWidth >= && reserveWidth < );
_ASSERT(reserveHeight >= && reserveHeight < );
_ASSERT(Channels >= && Channels <= );
int reservePixels = reserveWidth * reserveHeight;
int numberOfPixels = reservePixels * Channels;
);
);
);
);
if (leftColorBuffer == NULL || leftFactorBuffer == NULL || rightColorBuffer == NULL || rightFactorBuffer == NULL)
{
if (leftColorBuffer) free(leftColorBuffer);
if (leftFactorBuffer) free(leftFactorBuffer);
if (rightColorBuffer) free(rightColorBuffer);
if (rightFactorBuffer) free(rightFactorBuffer);
return;
}
float* downColorBuffer = leftColorBuffer;
float* downFactorBuffer = leftFactorBuffer;
float* upColorBuffer = rightColorBuffer;
float* upFactorBuffer = rightFactorBuffer;
// compute a lookup table
)));
.f - alpha_f;
+ ];
);
.f;
; i <= ; i++, ii -= .f)
{
range_table_f[i] = alpha_f * exp(ii * inv_sigma_range);
}
CRB_HorizontalFilter(Input, Output, Width, Height, Channels, range_table_f, inv_alpha_f, leftColorBuffer, leftFactorBuffer, rightColorBuffer, rightFactorBuffer);
CRB_VerticalFilter(Input, Output, Width, Height, Channels, range_table_f, inv_alpha_f, downColorBuffer, downFactorBuffer, upColorBuffer, upFactorBuffer);
if (leftColorBuffer)
{
free(leftColorBuffer);
leftColorBuffer = NULL;
}
if (leftFactorBuffer)
{
free(leftFactorBuffer);
leftFactorBuffer = NULL;
}
if (rightColorBuffer)
{
free(rightColorBuffer);
rightColorBuffer = NULL;
}
if (rightFactorBuffer)
{
free(rightFactorBuffer);
rightFactorBuffer = NULL;
}
}
void CRBFilter(unsigned char* Input, unsigned char* Output, int Width, int Height, int Stride, float sigmaSpatial, float sigmaRange);
就这样一个简单的接口函数。
代码也比较清晰,不多做解释了。
上个效果演示图。


前后对比,注意看人物的眼袋部分,有明显的条纹效应。
好久没上来发文了,最近有好多网友,询问关于《票据OCR前预处理 (附Demo)》一些算法相关的细节。
说实话,这个代码写得有点久了,逻辑记不太清楚了。
待后面有时间,把代码梳理一下,再写篇博客分享完整逻辑代码。
俺的联系方式如下:
邮箱: gaozhihan@vip.qq.com
联系我时请说明来意,不然一律忽略,谢谢。
若此博文能帮到您,欢迎扫码小额赞助。
微信:

支付宝:

快速双边滤波 附完整C代码的更多相关文章
- 半径无关单核单线程最快速高斯模糊实现(附完整C代码)
之前,俺也发过不少快速高斯模糊算法. 俺一般认为,只要处理一千六百万像素彩色图片,在2.2GHz的CPU上单核单线程超过1秒的算法,都是不快的. 之前发的几个算法,在俺2.2GHz的CPU上耗时都会超 ...
- 半径无关快速高斯模糊实现(附完整C代码)
之前,俺也发过不少快速高斯模糊算法. 俺一般认为,只要处理一千六百万像素彩色图片,在2.2GHz的CPU上单核单线程超过1秒的算法,都是不快的. 之前发的几个算法,在俺2.2GHz的CPU上耗时都会超 ...
- 基于傅里叶变换的音频重采样算法 (附完整c代码)
前面有提到音频采样算法: WebRTC 音频采样算法 附完整C++示例代码 简洁明了的插值音频重采样算法例子 (附完整C代码) 近段时间有不少朋友给我写过邮件,说了一些他们使用的情况和问题. 坦白讲, ...
- 基于RNN的音频降噪算法 (附完整C代码)
前几天无意间看到一个项目rnnoise. 项目地址: https://github.com/xiph/rnnoise 基于RNN的音频降噪算法. 采用的是 GRU/LSTM 模型. 阅读下训练代码,可 ...
- 音频降噪算法 附完整C代码
降噪是音频图像算法中的必不可少的. 目的肯定是让图片或语音 更加自然平滑,简而言之,美化. 图像算法和音频算法 都有其共通点. 图像是偏向 空间 处理,例如图片中的某个区域. 图像很多时候是以二维数据 ...
- mser 最大稳定极值区域(文字区域定位)算法 附完整C代码
mser 的全称:Maximally Stable Extremal Regions 第一次听说这个算法时,是来自当时部门的一个同事, 提及到他的项目用它来做文字区域的定位,对这个算法做了一些优化. ...
- 音频自动增益 与 静音检测 算法 附完整C代码
前面分享过一个算法<音频增益响度分析 ReplayGain 附完整C代码示例> 主要用于评估一定长度音频的音量强度, 而分析之后,很多类似的需求,肯定是做音频增益,提高音量诸如此类做法. ...
- 音频自动增益 与 静音检测 算法 附完整C代码【转】
转自:https://www.cnblogs.com/cpuimage/p/8908551.html 前面分享过一个算法<音频增益响度分析 ReplayGain 附完整C代码示例> 主要用 ...
- 经典傅里叶算法小集合 附完整c代码
前面写过关于傅里叶算法的应用例子. <基于傅里叶变换的音频重采样算法 (附完整c代码)> 当然也就是举个例子,主要是学习傅里叶变换. 这个重采样思路还有点瑕疵, 稍微改一下,就可以支持多通 ...
随机推荐
- Python3调用企业微信用于告警
前段时间利用py爬虫抓取一些网页信息,然后通过wxpy发送到微信群,以用作日常告警,感觉还是很方便. 但好景不长,我的小号微信被腾讯封了(很常见咯), 显示无法登录网页版微信,至今已经有半个多月了. ...
- edittext基本用法总结.md
光标的有关问题 edittext.setSelection(2); //记住一个不能越界的bug edittext.setCursorVisible(false); //设置光标显示,不能设置光标颜色 ...
- 各种选择框jQuery的选中方法
select下拉列表的选中方法是:$("slect option:eq(1)").attr("selected",true);//选中第二个option che ...
- IOS学习【前言】
2016-1-14 16年开始时导师安排任务,开始IOS学习之旅 经过几天的学习,感觉还是需要作比较多的学习笔记,因此开始用博客记录整个过程,方便以后查看学习与分享. 主要记录一些关键的问题处理方法 ...
- C# WinForm 跨线程访问控件
问题出现: 在WinForm 处理多线程访问主线程的控件时候,就会出现如图所示的错误对话框: 解决方案: 方案一:去掉线程访问主线程UI控件的安全检查,使用: Control.CheckFor ...
- 1011. A+B和C (15)
/*1011. A+B和C (15) 时间限制150 ms内存限制65536 kB代码长度限制8000 B判题程序Standard作者HOU, Qiming给定区间[-231, 231]内的3个整数A ...
- js数组、内置对象、自定义对象
[js中的数组] 1.数组的基本概念? 数组是在内存空间中连续存储的一组有序数据的集合 元素在数组中的顺序,称为下标.可以使用下表访问数字的每个元素. 2.如何声明一个数组? ① 使用字面量声明: 在 ...
- 【Alpha】第一次Daily Scrum Meeting
一.今日站立式会议照片 二.会议内容 1.调研市场现有礼物挑选软件,分析优势,亮点,劣势 2.确立开发环境和安装调试 三.燃尽图 四.遇到的困难 在准备开发环境和安装调试时遇到系统和开发环境不要兼容, ...
- 姑娘你大胆地往前走——答大二学生XCL之八问
姑娘你大胆地往前走--答大二学生XCL之八问 以下问题的答案写给我家正在读大二的XCL. 写于 2017-9-13 晚 请问您是为什么选择了IT行业的? 与其说是我选择了行业,不如说是行业选择了我. ...
- 201521123103 《Java学习笔记》 第八周学习总结
一.本周学习总结 1.1 以你喜欢的方式(思维导图或其他)归纳总结集合与泛型相关内容. 集合部分:TreeMap实现类:对键值进行排序. Map的entrySet Set<Map.Entry&l ...