文章参考
一 打开输入流
创建一个native方法,从java层传入视频地址,例如下面代码
jint native_play(JNIEnv *env, jobject instance, jstring inputPath_)
然后使用下面代码打开输入流
//打开输入流
AVFormatContext *avFormatContext = avformat_alloc_context();
const char *inputPath = env->GetStringUTFChars(inputPath_, nullptr);
avformat_open_input(&avFormatContext, inputPath, nullptr, nullptr);
avformat_find_stream_info(avFormatContext, nullptr);
二 找到视频流
通过 avFormatContext 找到视频流
//找到视频流
int video_index = -1;
for (int i = 0; i < avFormatContext->nb_streams; ++i) {
if (avFormatContext->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
video_index = i;
}
}
三 打开解码器
//打开解码器
AVCodec *avCodec = avcodec_find_decoder(
avFormatContext->streams[video_index]->codecpar->codec_id);
AVCodecContext *avCodecContext = avcodec_alloc_context3(avCodec);
avcodec_parameters_to_context(avCodecContext, avFormatContext->streams[video_index]->codecpar);
if (avcodec_open2(avCodecContext, avCodec, nullptr) < 0) {
return -1;
}
四 申请AVPacket和AVFrame存放解析前后视频数据
具体可参考FFMPEG中最关键的结构体之间的关系
//申请AVPacket和AVFrame
auto packet = static_cast<AVPacket *>(av_malloc(sizeof(AVPacket)));
av_init_packet(packet);
AVFrame *frame = av_frame_alloc();
AVFrame *rgb_frame = av_frame_alloc();
auto *out_buffer = (uint8_t *) av_malloc(
av_image_get_buffer_size(AV_PIX_FMT_RGBA, avCodecContext->width, avCodecContext->height,
1));
av_image_fill_arrays(rgb_frame->data, rgb_frame->linesize, out_buffer, AV_PIX_FMT_RGBA,
avCodecContext->width, avCodecContext->height, 1);
四 配置ANativeWindow
顾名思义,用来展示视频数据。在java新建个native方法,传入surface,用于生成ANativeWindow,示例如下
ANativeWindow *nativeWindow;
void setVideoSurface(JNIEnv *env, jobject instance, jobject surface) {
//申请ANativeWindow
if (nativeWindow) {
ANativeWindow_release(nativeWindow);
nativeWindow = nullptr;
}
nativeWindow = ANativeWindow_fromSurface(env, surface);
}
五 解码
//视频缓冲区
ANativeWindow_Buffer nativeWindowBuffer;
//解码
while (av_read_frame(avFormatContext, packet) >= 0) {
if (packet->stream_index == video_index) {
avcodec_send_packet(avCodecContext, packet);
if (!avcodec_receive_frame(avCodecContext, frame) && (nativeWindow != nullptr)) {
//配置nativeWindow
ANativeWindow_setBuffersGeometry(nativeWindow, avCodecContext->width,
avCodecContext->height, WINDOW_FORMAT_RGBA_8888);
//上锁
if(ANativeWindow_lock(nativeWindow, &nativeWindowBuffer, nullptr)){
continue;
}
//转换成RGBA格式
sws_scale(swsContext, frame->data, frame->linesize, 0, frame->height,
rgb_frame->data, rgb_frame->linesize);
// rgb_frame是有画面数据
auto *dst = static_cast<uint8_t *>(nativeWindowBuffer.bits);
//拿到一行有多少个字节 RGBA
int destStride = nativeWindowBuffer.stride * 4;
//像素数据的首地址
uint8_t *src = rgb_frame->data[0];
//实际内存一行数量
int srcStride = rgb_frame->linesize[0];
for (int i = 0; i < avCodecContext->height; i++) {
//将rgb_frame中每一行的数据复制给nativewindow
memcpy(dst + i * destStride, src + i * srcStride, srcStride);
}
ANativeWindow_unlockAndPost(nativeWindow);
usleep(1000 * 16);
}
}
av_packet_unref(packet);
}
六 释放资源
ANativeWindow_release(nativeWindow);
av_frame_free(&frame);
av_frame_free(&rgb_frame);
avcodec_close(avCodecContext);
avformat_free_context(avFormatContext);
env->ReleaseStringUTFChars(inputPath_, inputPath);
配套代码简单说明
头文件
#include <jni.h>
#include <string>
#include "native-lib.h"
#include <android/native_window_jni.h>
extern "C" {
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
#include "libavutil/imgutils.h"
#include "libswscale/swscale.h"
#include <unistd.h>
}
CMakeLists.txt加上android库的链接
target_link_libraries( # Specifies the target library.
native-lib
avcodec
avdevice
avfilter
avformat
avutil
postproc
swresample
swscale
android
# Links the target library to the log library
# included in the NDK.
${log-lib})
java层的代码就不贴了,就是写个surface,在surfaceChanged回调里把surface传进去,然后申请存储权限,在子线程里把视频路径传进去