OpenGL.Shader:志哥教你写一个滤镜直播客户端(5)视觉滤镜:对比度、曝光、马赛克

OpenGL.Shader:志哥教你写一个滤镜直播客户端(5)

上一章介绍了如何在渲染nv21流的时候进行滤镜的无缝切换,这章内容紧接上一章,介绍三种滤镜特效:对比度、曝光、马赛克,并介绍如何动态调节滤镜效果。废话不说,show the code!

滤镜1:对比度

#include "GpuBaseFilter.hpp"
/**
 * 更改图像的对比度。
 * 对比度值在0.0到4.0之间,正常值为1.0
 */
class GpuContrastFilter : public GpuBaseFilter {
public:
    int getTypeId() { return FILTER_TYPE_CONTRAST; }

    GpuContrastFilter()
    {
        CONTRAST_FRAGMENT_SHADER  ="precision mediump float;\n\
                                    varying highp vec2 textureCoordinate;\n\
                                    uniform sampler2D SamplerRGB;\n\
                                    uniform sampler2D SamplerY;\n\
                                    uniform sampler2D SamplerU;\n\
                                    uniform sampler2D SamplerV;\n\
                                    uniform lowp float contrast;\n\
                                    mat3 colorConversionMatrix = mat3(\n\
                                                       1.0, 1.0, 1.0,\n\
                                                       0.0, -0.39465, 2.03211,\n\
                                                       1.13983, -0.58060, 0.0);\n\
                                    vec3 yuv2rgb(vec2 pos)\n\
                                    {\n\
                                       vec3 yuv;\n\
                                       yuv.x = texture2D(SamplerY, pos).r;\n\
                                       yuv.y = texture2D(SamplerU, pos).r - 0.5;\n\
                                       yuv.z = texture2D(SamplerV, pos).r - 0.5;\n\
                                       return colorConversionMatrix * yuv;\n\
                                    }\n\
                                    void main()\n\
                                    {\n\
                                       vec4 textureColor = vec4(yuv2rgb(textureCoordinate), 1.0);\n\
                                       gl_FragColor = vec4((contrast*(textureColor.rgb - vec3(0.5)) + vec3(0.5)), textureColor.w);\n\
                                    }";
    }
    ~GpuContrastFilter() {
        if(!CONTRAST_FRAGMENT_SHADER.empty()) CONTRAST_FRAGMENT_SHADER.clear();
    }
    void init() {
        GpuBaseFilter::init(NO_FILTER_VERTEX_SHADER.c_str(), CONTRAST_FRAGMENT_SHADER.c_str());
        mContrastLocation = glGetUniformLocation(mGLProgId, "contrast");
        mContrastValue = 1.0f;
    }

    void setAdjustEffect(float percent) {
        mContrastValue = percent * 4.0f;
    } // (经验范围控制在0~4)

    void onDraw(GLuint SamplerY_texId, GLuint SamplerU_texId, GLuint SamplerV_texId,
                        void* positionCords, void* textureCords)
    { // ...}

private:
    std::string CONTRAST_FRAGMENT_SHADER;

    GLint   mContrastLocation;
    float   mContrastValue;
};

顶点着色器沿用基类GpuBaseFilter.NO_FILTER_VERTEX_SHADER,分析一波片元着色器,不难发现对比度原理:

gl_FragColor = vec4((contrast*(textureColor.rgb - vec3(0.5)) + vec3(0.5)), textureColor.w);

textureColor.rgb - vec3(0.5)是用于二分对齐,也可以理解为是量化;contrast就是对比度因子;乘以对比度用以扩大不同色值的层级范围。(经验范围控制在0~4)

 显然这个contrast是要能动态进行调节的,回想上一章内容的设计方法,在GpuFilterRender跟踪setAdjustEffect(float percent)不难发现也是在renderOnDraw调用,部分代码如下:

void GpuFilterRender::renderOnDraw(double elpasedInMilliSec)
{
        // 画面渲染
        mWindowSurface->makeCurrent();
        yTextureId = updateTexture(dst_y, yTextureId, mFrameWidth, mFrameHeight);
        uTextureId = updateTexture(dst_u, uTextureId, mFrameWidth/2, mFrameHeight/2);
        vTextureId = updateTexture(dst_v, vTextureId, mFrameWidth/2, mFrameHeight/2);
        // 检测更新Filter
        checkFilterChange();
        if( mFilter!=NULL) {
            mFilter->setAdjustEffect(mFilterEffectPercent);
            mFilter->onDraw(yTextureId, uTextureId, vTextureId, positionCords, textureCords);
        }
        // ...    
}
void GpuFilterRender::adjustFilterValue(int value, int max) {
    mFilterEffectPercent = (float)value / (float)max;
    //LOGD("GpuFilterRender adjust %f", mFilterEffectPercent);
}
///gpu_filter_jni//
JNIEXPORT void JNICALL
Java_org_zzrblog_gpufilter_GpuFilterRender_adjustFilterValue(JNIEnv *env, jobject instance, jint value, jint max) {
    if (render == NULL)
        render = new GpuFilterRender();
    render->adjustFilterValue(value, max);
}

 继续回溯跟踪,可以发现通过Activity调用Seekbar,然后再调用CfeScheduler.adjustFilterValue(value, max),最后实现动态调节对比度因子contrast,效果如下:

滤镜2:曝光(黑白反转)

#include "GpuBaseFilter.hpp"
/**
 * 反转图像中的所有颜色。
 */
class GpuColorInvertFilter : public GpuBaseFilter {
public:
    int getTypeId() { return FILTER_TYPE_COLOR_INVERT; }

    GpuColorInvertFilter()
    {
        COLOR_INVERT_FRAGMENT_SHADER="precision mediump float;\n\
                                    varying highp vec2 textureCoordinate;\n\
                                    uniform sampler2D SamplerRGB;\n\
                                    uniform sampler2D SamplerY;\n\
                                    uniform sampler2D SamplerU;\n\
                                    uniform sampler2D SamplerV;\n\
                                    mat3 colorConversionMatrix = mat3(\n\
                                                       1.0, 1.0, 1.0,\n\
                                                       0.0, -0.39465, 2.03211,\n\
                                                       1.13983, -0.58060, 0.0);\n\
                                    vec3 yuv2rgb(vec2 pos)\n\
                                    {\n\
                                       vec3 yuv;\n\
                                       yuv.x = texture2D(SamplerY, pos).r;\n\
                                       yuv.y = texture2D(SamplerU, pos).r - 0.5;\n\
                                       yuv.z = texture2D(SamplerV, pos).r - 0.5;\n\
                                       return colorConversionMatrix * yuv;\n\
                                    }\n\
                                    void main()\n\
                                    {\n\
                                       vec4 textureColor = vec4(yuv2rgb(textureCoordinate), 1.0);\n\
                                       gl_FragColor = vec4((1.0 - textureColor.rgb), textureColor.w);\n\
                                    }";
    }
    ~GpuColorInvertFilter() {
        if(!COLOR_INVERT_FRAGMENT_SHADER.empty()) COLOR_INVERT_FRAGMENT_SHADER.clear();
    }

    void init() {
        GpuBaseFilter::init(NO_FILTER_VERTEX_SHADER.c_str(), COLOR_INVERT_FRAGMENT_SHADER.c_str());
    }

private:
    std::string COLOR_INVERT_FRAGMENT_SHADER;
};

顶点着色器还是沿用基类GpuBaseFilter.NO_FILTER_VERTEX_SHADER,分析一波片元着色器,不难发现曝光的原理:

gl_FragColor = vec4((1.0 - textureColor.rgb), textureColor.w); 

其实就是取反啊!而且还不需要动态调节,so easy (个_个)   

上个效果图意思意思:

滤镜3:马赛克

最后一个也是最有意思的一个滤镜效果,各位老司机可能就是最讨厌的一个了(斜眼笑.jpg)

#include "GpuBaseFilter.hpp"
/**
 * 对图像应用格仔化效果。
 */
class GpuPixelationFilter : public GpuBaseFilter {
public:
    int getTypeId() { return FILTER_TYPE_PIXELATION; }

    GpuPixelationFilter()
    {
        PIXELATION_FRAGMENT_SHADER="precision highp float;\n\
                                    varying highp vec2 textureCoordinate;\n\
                                    uniform sampler2D SamplerRGB;\n\
                                    uniform sampler2D SamplerY;\n\
                                    uniform sampler2D SamplerU;\n\
                                    uniform sampler2D SamplerV;\n\
                                    mat3 colorConversionMatrix = mat3(\n\
                                                       1.0, 1.0, 1.0,\n\
                                                       0.0, -0.39465, 2.03211,\n\
                                                       1.13983, -0.58060, 0.0);\n\
                                    uniform float imageWidthFactor;\n\
                                    uniform float imageHeightFactor;\n\
                                    uniform float pixel;\n\
                                    vec3 yuv2rgb(vec2 pos)\n\
                                    {\n\
                                        vec3 yuv;\n\
                                        yuv.x = texture2D(SamplerY, pos).r;\n\
                                        yuv.y = texture2D(SamplerU, pos).r-0.5;\n\
                                        yuv.z = texture2D(SamplerV, pos).r-0.5;\n\
                                        return colorConversionMatrix * yuv;\n\
                                    }\n\
                                    void main()\n\
                                    {\n\
                                        vec2 uv  = textureCoordinate.xy;\n\
                                        float dx = pixel * imageWidthFactor;\n\
                                        float dy = pixel * imageHeightFactor;\n\
                                        vec2 coord = vec2(dx*floor(uv.x / dx), dy*floor(uv.y / dy));\n\
                                        gl_FragColor = vec4(yuv2rgb(coord), 1.0);\n\
                                    }";
    }
    ~GpuPixelationFilter() {
        if(!PIXELATION_FRAGMENT_SHADER.empty()) PIXELATION_FRAGMENT_SHADER.clear();
    }

    void init() {
        GpuBaseFilter::init(NO_FILTER_VERTEX_SHADER.c_str(), PIXELATION_FRAGMENT_SHADER.c_str());
        mPixelLocation = glGetUniformLocation(mGLProgId, "pixel");
        mImageWidthFactorLocation = glGetUniformLocation(mGLProgId, "imageWidthFactor");
        mImageHeightFactorLocation = glGetUniformLocation(mGLProgId, "imageHeightFactor");
        mPixelValue = 1.0f;
    }
    void setAdjustEffect(float percent) {
        if(percent==0.0f) percent=0.01f;
        mPixelValue = percent * 100.0f;
    }
    void onOutputSizeChanged(int width, int height) {
        GpuBaseFilter::onOutputSizeChanged(width, height);
        glUniform1f(mImageWidthFactorLocation, 1.0f / width);
        glUniform1f(mImageHeightFactorLocation, 1.0f / height);
    }
    // ...
    void onDraw(GLuint SamplerY_texId, GLuint SamplerU_texId, GLuint SamplerV_texId,
                void* positionCords, void* textureCords)
    {
        if (!mIsInitialized)
            return;
        glUseProgram(mGLProgId);

        glUniform1f(mPixelLocation, mPixelValue);
        glUniform1f(mImageWidthFactorLocation, 1.0f / mOutputWidth);
        glUniform1f(mImageHeightFactorLocation, 1.0f / mOutputHeight);
        // 绘制的模板代码,此处省略
    }
};

内容有点多,一起来分析一波:

uniform float imageWidthFactor; // 当前屏幕宽度因子,取值为当前宽度的1/10
uniform float imageHeightFactor; 
uniform float pixel; // 采样跨度

void main()
{
    vec2 uv  = textureCoordinate.xy; // 当前纹理坐标
    float dx = pixel * imageWidthFactor;  // 根据采样跨度,调整步长
    float dy = pixel * imageHeightFactor;
    // floor(uv.x / dx)“向下舍入”,具体数值说明白,屏幕720*1280,widthFacetor=72
    // uv.x = 1,pixel = 1,代入计算 72*(floor(1/72)) = 0
    // uv.x = 2,pixel = 1,代入计算 72*(floor(2/72)) = 0
    // uv.x = 71,pixel = 1,代入计算 72*(floor(71/72)) = 0
    // uv.x = 72,pixel = 1,代入计算 72*(floor(72/72)) = 72
    //以上可以说明,通过计算,可以把步长范围内的所有纹理坐标,锁定到范围内的第一个像素坐标位置进行纹理采样。
    vec2 coord = vec2(dx*floor(uv.x / dx), dy*floor(uv.y / dy));
    gl_FragColor = vec4(yuv2rgb(coord), 1.0);
}";

分析完毕,放效果图看看。

       

 项目地址: https://github.com/MrZhaozhirong/NativeCppApp    shader集中放置在src\main\cpp\gpufilter\filter

猜你喜欢

转载自blog.csdn.net/a360940265a/article/details/104636475