前言
继续上一章 的,把usb不支持265,应为265小,所以 转265再推流,然后在拉流本地保存及用openc展示
1:环境
跟上章一样,这里不介绍了 访问地址
2: 直接上代码
#define _CRT_SECURE_NO_WARNINGS
#include <iostream>
#include <string>
#include <memory>
#include <thread>
#include <chrono>
#include <ctime>
#include <limits>
extern "C" {
#include <libavformat/avformat.h>
#include <libavcodec/avcodec.h>
#include <libavutil/avutil.h>
#include <libavutil/imgutils.h>
#include <libswscale/swscale.h>
#include <libavdevice/avdevice.h>
}
#pragma comment(lib, "avformat.lib")
#pragma comment(lib, "avcodec.lib")
#pragma comment(lib, "avutil.lib")
#pragma comment(lib, "swscale.lib")
#pragma comment(lib, "swresample.lib")
#pragma comment(lib, "avdevice.lib")
#include <opencv2/opencv.hpp>
#pragma comment(lib, "opencv_core4110d.lib") //opencv_core4110d.lib
#pragma comment(lib, "opencv_imgcodecs4110d.lib")
#pragma comment(lib, "opencv_highgui4110d.lib")
#pragma comment(lib, "opencv_imgcodecs4110d.lib")
#pragma comment(lib, "opencv_video4110d.lib")
#pragma comment(lib, "opencv_videoio4110d.lib")
// 自定义删除器用于释放 AVCodecContext
struct AVCodecContextDeleter {
void operator()(AVCodecContext* ctx) const {
if (ctx) {
avcodec_free_context(&ctx);
}
}
};
void pullAndSave(const char* rtsp_url);
void pullAndSave2(const char* rstp_url);
// 拉取数据并保存为 MP4 文件的线程函数
void pullAndSaveData(const char* rtsp_url) {
AVFormatContext* input_format_context = nullptr;
if (avformat_open_input(&input_format_context, rtsp_url, nullptr, nullptr) != 0) {
std::cerr << "Could not open input RTSP stream" << std::endl;
return;
}
if (avformat_find_stream_info(input_format_context, nullptr) < 0) {
std::cerr << "Could not find stream information" << std::endl;
avformat_close_input(&input_format_context);
return;
}
int video_stream_index = -1;
for (unsigned int i = 0; i < input_format_context->nb_streams; i++) {
if (input_format_context->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
video_stream_index = i;
break;
}
}
if (video_stream_index == -1) {
std::cerr << "Could not find video stream" << std::endl;
avformat_close_input(&input_format_context);
return;
}
auto start_time = std::chrono::system_clock::now();
int file_index = 0;
AVFormatContext* output_format_context = nullptr;
AVPacket* packet = av_packet_alloc();
int64_t total_bytes = 0; // 用于统计拉取的总字节数
while (true) {
auto current_time = std::chrono::system_clock::now();
auto elapsed_time = std::chrono::duration_cast<std::chrono::minutes>(current_time - start_time).count();
if (elapsed_time >= 1 || output_format_context == nullptr) {
if (output_format_context != nullptr) {
// 写入文件尾
if (av_write_trailer(output_format_context) < 0) {
std::cerr << "Error writing trailer to output file" << std::endl;
}
if (!(output_format_context->oformat->flags & AVFMT_NOFILE)) {
if (avio_closep(&output_format_context->pb) < 0) {
std::cerr << "Error closing output file" << std::endl;
}
}
avformat_free_context(output_format_context);
}
// 生成新的文件名
std::time_t now = std::chrono::system_clock::to_time_t(current_time);
char filename[100];
std::tm tm_info;
localtime_s(&tm_info, &now);
std::strftime(filename, sizeof(filename), "output_%Y%m%d_%H%M%S.mp4", &tm_info);
avformat_alloc_output_context2(&output_format_context, nullptr, "mp4", filename);
if (!output_format_context) {
std::cerr << "Could not create output context" << std::endl;
break;
}
AVStream* output_stream = avformat_new_stream(output_format_context, nullptr);
if (!output_stream) {
std::cerr << "Failed allocating output stream" << std::endl;
avformat_free_context(output_format_context);
break;
}
avcodec_parameters_copy(output_stream->codecpar, input_format_context->streams[video_stream_index]->codecpar);
if (!(output_format_context->oformat->flags & AVFMT_NOFILE)) {
if (avio_open(&output_format_context->pb, filename, AVIO_FLAG_WRITE) < 0) {
std::cerr << "Could not open output file" << std::endl;
avformat_free_context(output_format_context);
break;
}
}
// 配置输出流的时间基等信息
output_stream->time_base = input_format_context->streams[video_stream_index]->time_base;
std::cout << "Output stream time base: " << output_stream->time_base.num << "/" << output_stream->time_base.den << std::endl;
if (avformat_write_header(output_format_context, nullptr) < 0) {
std::cerr << "Error occurred when opening output file" << std::endl;
if (!(output_format_context->oformat->flags & AVFMT_NOFILE)) {
if (avio_closep(&output_format_context->pb) < 0) {
std::cerr << "Error closing output file" << std::endl;
}
}
avformat_free_context(output_format_context);
break;
}
start_time = current_time;
file_index++;
}
int ret = av_read_frame(input_format_context, packet);
if (ret < 0) {
if (ret == AVERROR_EOF) {
std::cerr << "End of input stream reached" << std::endl;
}
else {
std::cerr << "Error reading frame from input stream: " << ret << std::endl;
}
break;
}
// 验证输入数据包的时间戳
if (packet->pts < 0) {
std::cerr << "Invalid negative PTS in input packet: " << packet->pts << std::endl;
// 可以选择丢弃该数据包或进行其他处理
av_packet_unref(packet);
continue;
}
if (packet->stream_index == video_stream_index) {
total_bytes += packet->size; // 累加字节数
ret = av_write_frame(output_format_context, packet);
if (ret < 0) {
std::cerr << "Error writing frame to output file: " << ret << std::endl;
}
}
av_packet_unref(packet);
}
// 确保最后一个文件正确关闭和释放资源
if (output_format_context != nullptr) {
if (av_write_trailer(output_format_context) < 0) {
std::cerr << "Error writing trailer to output file" << std::endl;
}
if (!(output_format_context->oformat->flags & AVFMT_NOFILE)) {
if (avio_closep(&output_format_context->pb) < 0) {
std::cerr << "Error closing output file" << std::endl;
}
}
avformat_free_context(output_format_context);
}
av_packet_free(&packet);
avformat_close_input(&input_format_context);
// 输出拉取的总字节数
std::cout << "Total bytes pulled: " << total_bytes << " bytes" << std::endl;
if (total_bytes > 0) {
std::cout << "Successfully pulled the stream." << std::endl;
}
else {
std::cout << "No stream data was pulled." << std::endl;
}
}
int main() {
const char* rtsp_url = "rtsp://192.168.1.100:554/live/test1";
const char* device_name = "video=USB2.0 PC CAMERA";
// 初始化 FFmpeg 库
avformat_network_init();
avdevice_register_all();
// 启动拉取数据并保存为 MP4 文件的线程
// std::thread pull_thread(pullAndSaveData, rtsp_url);
//
std::thread pull_thread(pullAndSave2, rtsp_url);
// 打开 USB 摄像头(使用 dshow)
AVFormatContext* input_format_context = nullptr;
AVDictionary* input_options = nullptr;
AVInputFormat* input_format = av_find_input_format("dshow");
if (!input_format) {
std::cerr << "Could not find dshow input format" << std::endl;
return -1;
}
// 设置 rtbufsize 参数,增加缓冲区大小
av_dict_set(&input_options, "rtbufsize", "100000000", 0); // 设置为 100MB
if (avformat_open_input(&input_format_context, device_name, input_format, &input_options) != 0) {
std::cerr << "Could not open USB camera" << std::endl;
return -1;
}
// 读取输入流信息
if (avformat_find_stream_info(input_format_context, nullptr) < 0) {
std::cerr << "Could not find stream information" << std::endl;
avformat_close_input(&input_format_context);
return -1;
}
// 查找视频流
int video_stream_index = -1;
for (unsigned int i = 0; i < input_format_context->nb_streams; i++) {
if (input_format_context->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
video_stream_index = i;
break;
}
}
if (video_stream_index == -1) {
std::cerr << "Could not find video stream" << std::endl;
avformat_close_input(&input_format_context);
return -1;
}
// 获取输入视频流的编解码器参数
AVCodecParameters* input_codec_parameters = input_format_context->streams[video_stream_index]->codecpar;
AVCodecID input_codec_id = input_codec_parameters->codec_id;
// 查找输入视频流的解码器
AVCodec* input_codec = avcodec_find_decoder(input_codec_id); codec = avcodec_find_decoder_by_name("libx265");
if (!input_codec) {
std::cerr << "Could not find input codec" << std::endl;
avformat_close_input(&input_format_context);
return -1;
}
// 使用 std::shared_ptr 管理解码器上下文
std::shared_ptr<AVCodecContext> input_codec_context(
avcodec_alloc_context3(input_codec),
AVCodecContextDeleter()
);
if (!input_codec_context) {
std::cerr << "Could not allocate input codec context" << std::endl;
avformat_close_input(&input_format_context);
return -1;
}
// 将编解码器参数复制到解码器上下文
if (avcodec_parameters_to_context(input_codec_context.get(), input_codec_parameters) < 0) {
std::cerr << "Could not copy input codec parameters to codec context" << std::endl;
avformat_close_input(&input_format_context);
return -1;
}
// 打开输入解码器
if (avcodec_open2(input_codec_context.get(), input_codec, nullptr) < 0) {
std::cerr << "Could not open input codec" << std::endl;
avformat_close_input(&input_format_context);
return -1;
}
// 查找 H.265 编码器
AVCodec* h265_codec = avcodec_find_encoder(AV_CODEC_ID_HEVC);
if (!h265_codec) {
std::cerr << "Could not find H.265 encoder" << std::endl;
avformat_close_input(&input_format_context);
return -1;
}
// 使用 std::shared_ptr 管理 H.265 编码器上下文
std::shared_ptr<AVCodecContext> h265_codec_context(
avcodec_alloc_context3(h265_codec),
AVCodecContextDeleter()
);
if (!h265_codec_context) {
std::cerr << "Could not allocate H.265 encoder context" << std::endl;
avformat_close_input(&input_format_context);
return -1;
}
// 配置 H.265 编码器上下文
h265_codec_context->codec_id = AV_CODEC_ID_HEVC;
h265_codec_context->codec_type = AVMEDIA_TYPE_VIDEO;
h265_codec_context->pix_fmt = AV_PIX_FMT_YUV420P;
h265_codec_context->width = input_codec_context->width;
h265_codec_context->height = input_codec_context->height;
h265_codec_context->time_base = {
1, 30 }; // 帧率 30fps
h265_codec_context->framerate = {
30, 1 };
h265_codec_context->bit_rate = 1000000; // 设置码率为 1Mbps
// 打开 H.265 编码器
if (avcodec_open2(h265_codec_context.get(), h265_codec, nullptr) < 0) {
std::cerr << "Could not open H.265 encoder" << std::endl;
avformat_close_input(&input_format_context);
return -1;
}
// 分配输出格式上下文
AVFormatContext* output_format_context = nullptr;
avformat_alloc_output_context2(&output_format_context, nullptr, "rtsp", rtsp_url);
if (!output_format_context) {
std::cerr << "Could not create output context" << std::endl;
avformat_close_input(&input_format_context);
return -1;
}
// 创建输出流
AVStream* output_stream = avformat_new_stream(output_format_context, h265_codec);
if (!output_stream) {
std::cerr << "Failed allocating output stream" << std::endl;
avformat_free_context(output_format_context);
avformat_close_input(&input_format_context);
return -1;
}
output_stream->time_base = h265_codec_context->time_base;
if (avcodec_parameters_from_context(output_stream->codecpar, h265_codec_context.get()) < 0) {
std::cerr << "Could not copy output codec parameters to stream" << std::endl;
avformat_free_context(output_format_context);
avformat_close_input(&input_format_context);
return -1;
}
// 打开输出 URL
if (!(output_format_context->oformat->flags & AVFMT_NOFILE)) {
if (avio_open(&output_format_context->pb, rtsp_url, AVIO_FLAG_WRITE) < 0) {
std::cerr << "Could not open output URL" << std::endl;
avformat_free_context(output_format_context);
avformat_close_input(&input_format_context);
return -1;
}
}
// 写入文件头
if (avformat_write_header(output_format_context, nullptr) < 0) {
std::cerr << "Error occurred when opening output URL" << std::endl;
if (output_format_context && !(output_format_context->oformat->flags & AVFMT_NOFILE)) {
if (avio_closep(&output_format_context->pb) < 0) {
std::cerr << "Error closing output file" << std::endl;
}
}
avformat_free_context(output_format_context);
avformat_close_input(&input_format_context);
return -1;
}
// 分配解码帧和编码帧
AVFrame* decoded_frame = av_frame_alloc();
AVFrame* encoded_frame = av_frame_alloc();
if (!decoded_frame || !encoded_frame) {
std::cerr << "Could not allocate frames" << std::endl;
avformat_free_context(output_format_context);
avformat_close_input(&input_format_context);
return -1;
}
encoded_frame->format = h265_codec_context->pix_fmt;
encoded_frame->width = h265_codec_context->width;
encoded_frame->height = h265_codec_context->height;
if (av_frame_get_buffer(encoded_frame, 0) < 0) {
std::cerr << "Could not allocate frame buffer" << std::endl;
av_frame_free(&decoded_frame);
av_frame_free(&encoded_frame);
avformat_free_context(output_format_context);
avformat_close_input(&input_format_context);
return -1;
}
// 分配图像转换上下文
SwsContext* sws_context = sws_getContext(
input_codec_context->width, input_codec_context->height, input_codec_context->pix_fmt,
h265_codec_context->width, h265_codec_context->height, h265_codec_context->pix_fmt,
SWS_BILINEAR, nullptr, nullptr, nullptr
);
if (!sws_context) {
std::cerr << "Could not initialize sws context" << std::endl;
av_frame_free(&decoded_frame);
av_frame_free(&encoded_frame);
avformat_free_context(output_format_context);
avformat_close_input(&input_format_context);
return -1;
}
// 分配数据包
AVPacket* input_packet = av_packet_alloc();
AVPacket* output_packet = av_packet_alloc();
if (!input_packet || !output_packet) {
std::cerr << "Could not allocate packets" << std::endl;
av_frame_free(&decoded_frame);
av_frame_free(&encoded_frame);
sws_freeContext(sws_context);
avformat_free_context(output_format_context);
avformat_close_input(&input_format_context);
return -1;
}
// 记录上一个时间戳
int64_t last_pts = -1;
int64_t base_pts = 0;
bool first_frame = true;
int64_t max_pts_jump = 1000; // 允许的最大时间戳跳跃
// 读取输入流,解码,编码并上传
while (av_read_frame(input_format_context, input_packet) >= 0) {
if (input_packet->stream_index == video_stream_index) {
// 发送数据包到解码器
if (avcodec_send_packet(input_codec_context.get(), input_packet) < 0) {
std::cerr << "Error sending packet to decoder" << std::endl;
continue;
}
// 从解码器接收帧
while (avcodec_receive_frame(input_codec_context.get(), decoded_frame) == 0) {
// 转换像素格式
sws_scale(
sws_context,
(const uint8_t* const*)decoded_frame->data, decoded_frame->linesize,
0, input_codec_context->height,
encoded_frame->data, encoded_frame->linesize
);
if (first_frame) {
base_pts = decoded_frame->pts;
first_frame = false;
}
// 处理时间戳,确保单调递增
int64_t current_pts = decoded_frame->pts - base_pts;
if (last_pts != -1) {
if (current_pts < last_pts) {
// 时间戳递减,进行修正
std::cerr << "Timestamp decreased, correcting: " << last_pts << " -> " << current_pts << std::endl;
// 确保修正后的时间戳不会产生溢出
current_pts = std::max(last_pts + 1, static_cast<int64_t>(0));
}
else if (current_pts - last_pts > max_pts_jump) {
// 时间戳跳跃过大,进行平滑处理
// std::cerr << "Timestamp jump too large, smoothing: " << last_pts << " -> " << current_pts << std::endl;
// 确保平滑后的时间戳不会产生溢出
current_pts = std::min(last_pts + 1, static_cast<int64_t>(std::numeric_limits<int64_t>::max()));
}
}
encoded_frame->pts = current_pts;
last_pts = current_pts;
// 发送帧到 H.265 编码器
if (avcodec_send_frame(h265_codec_context.get(), encoded_frame) < 0) {
std::cerr << "Error sending frame to H.265 encoder" << std::endl;
continue;
}
// 从 H.265 编码器接收数据包
while (avcodec_receive_packet(h265_codec_context.get(), output_packet) == 0) {
// 调整时间戳
av_packet_rescale_ts(output_packet, h265_codec_context->time_base, output_stream->time_base);
output_packet->stream_index = output_stream->index;
// 写入数据包到输出文件
if (av_interleaved_write_frame(output_format_context, output_packet) < 0) {
char errbuf[AV_ERROR_MAX_STRING_SIZE];
av_strerror(av_interleaved_write_frame(output_format_context, output_packet), errbuf, AV_ERROR_MAX_STRING_SIZE);
std::cerr << "Error writing packet to output file: " << errbuf << std::endl;
}
av_packet_unref(output_packet);
}
}
}
av_packet_unref(input_packet);
}
// 刷新编码器
avcodec_send_frame(h265_codec_context.get(), nullptr);
while (avcodec_receive_packet(h265_codec_context.get(), output_packet) == 0) {
av_packet_rescale_ts(output_packet, h265_codec_context->time_base, output_stream->time_base);
output_packet->stream_index = output_stream->index;
if (av_interleaved_write_frame(output_format_context, output_packet) < 0) {
char errbuf[AV_ERROR_MAX_STRING_SIZE];
av_strerror(av_interleaved_write_frame(output_format_context, output_packet), errbuf, AV_ERROR_MAX_STRING_SIZE);
std::cerr << "Error writing packet to output file: " << errbuf << std::endl;
}
av_packet_unref(output_packet);
}
// 写入文件尾
av_write_trailer(output_format_context);
// 释放资源
av_packet_free(&input_packet);
av_packet_free(&output_packet);
av_frame_free(&decoded_frame);
av_frame_free(&encoded_frame);
sws_freeContext(sws_context);
if (output_format_context && !(output_format_context->oformat->flags & AVFMT_NOFILE)) {
if (avio_closep(&output_format_context->pb) < 0) {
std::cerr << "Error closing output file" << std::endl;
}
}
avformat_free_context(output_format_context);
avformat_close_input(&input_format_context);
// 等待拉取线程结束
pull_thread.join();
return 0;
}
void pullAndSave(const char* rtsp_url) {
cv::VideoCapture cap;
// 设置 RTSP 流的参数,提高兼容性
cap.set(cv::CAP_PROP_FOURCC, cv::VideoWriter::fourcc('H', '2', '6', '5'));
cap.set(cv::CAP_PROP_BUFFERSIZE, 3);
// 尝试打开 RTSP 流
if (!cap.open(rtsp_url, cv::CAP_ANY)) {
std::cerr << "Could not open video stream: " << rtsp_url << std::endl;
// 再次尝试使用 FFmpeg 后端打开
if (!cap.open(rtsp_url, cv::CAP_FFMPEG)) {
std::cerr << "Failed to open video stream using FFmpeg backend." << std::endl;
return;
}
}
cv::Mat frame;
int frame_width = static_cast<int>(cap.get(cv::CAP_PROP_FRAME_WIDTH));
int frame_height = static_cast<int>(cap.get(cv::CAP_PROP_FRAME_HEIGHT));
double fps = cap.get(cv::CAP_PROP_FPS);
int frame_count = 0;
cv::VideoWriter writer;
auto start_time = std::chrono::system_clock::now();
while (true) {
cap >> frame;
if (frame.empty()) {
std::cerr << "Received an empty frame. Trying to reconnect..." << std::endl;
// 尝试重新连接
if (!cap.open(rtsp_url, cv::CAP_FFMPEG)) {
std::cerr << "Failed to reconnect to the video stream." << std::endl;
break;
}
continue;
}
auto current_time = std::chrono::system_clock::now();
auto elapsed_time = std::chrono::duration_cast<std::chrono::minutes>(current_time - start_time).count();
if (elapsed_time > 0 || frame_count == 0) {
if (writer.isOpened()) {
writer.release();
}
start_time = current_time;
std::time_t now = std::chrono::system_clock::to_time_t(start_time);
std::tm* local_time = std::localtime(&now);
std::ostringstream oss;
oss << "output_" << std::put_time(local_time, "%Y%m%d%H%M%S") << ".mp4";
std::string filename = oss.str();
writer.open(filename, cv::VideoWriter::fourcc('H', '2', '6', '5'), fps, cv::Size(frame_width, frame_height));
}
writer.write(frame);
cv::imshow("Video Stream", frame);
if (cv::waitKey(1) == 27) {
break;
}
frame_count++;
}
if (writer.isOpened()) {
writer.release();
}
cap.release();
cv::destroyAllWindows();
}
void pullAndSave2(const char* rstp_url) {
AVFormatContext* format_context = nullptr;
if (avformat_open_input(&format_context, rstp_url, nullptr, nullptr) != 0) {
std::cerr << "Could not open input stream" << std::endl;
return;
}
if (avformat_find_stream_info(format_context, nullptr) < 0) {
std::cerr << "Could not find stream information" << std::endl;
avformat_close_input(&format_context);
return;
}
int video_stream_index = -1;
for (unsigned int i = 0; i < format_context->nb_streams; i++) {
if (format_context->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
video_stream_index = i;
break;
}
}
if (video_stream_index == -1) {
std::cerr << "Could not find video stream" << std::endl;
avformat_close_input(&format_context);
return;
}
AVCodecParameters* codec_parameters = format_context->streams[video_stream_index]->codecpar;
AVCodec* codec = avcodec_find_decoder(codec_parameters->codec_id);
if (!codec) {
std::cerr << "Codec not found" << std::endl;
avformat_close_input(&format_context);
return;
}
std::shared_ptr<AVCodecContext> codec_context(avcodec_alloc_context3(codec), AVCodecContextDeleter());
if (avcodec_parameters_to_context(codec_context.get(), codec_parameters) < 0) {
std::cerr << "Could not copy codec parameters to codec context" << std::endl;
avformat_close_input(&format_context);
return;
}
if (avcodec_open2(codec_context.get(), codec, nullptr) < 0) {
std::cerr << "Could not open codec" << std::endl;
avformat_close_input(&format_context);
return;
}
AVFrame* frame = av_frame_alloc();
AVPacket packet;
cv::VideoWriter writer;
int frame_width = codec_context->width;
int frame_height = codec_context->height;
double fps = av_q2d(format_context->streams[video_stream_index]->r_frame_rate);
int frame_count = 0;
auto start_time = std::chrono::system_clock::now();
SwsContext* sws_ctx = sws_getContext(codec_context->width, codec_context->height,
codec_context->pix_fmt,
codec_context->width, codec_context->height,
AV_PIX_FMT_BGR24, SWS_BILINEAR, nullptr, nullptr, nullptr);
cv::Mat cv_frame(frame_height, frame_width, CV_8UC3);
while (av_read_frame(format_context, &packet) >= 0) {
if (packet.stream_index == video_stream_index) {
if (avcodec_send_packet(codec_context.get(), &packet) < 0) {
std::cerr << "Error sending packet to decoder" << std::endl;
continue;
}
while (avcodec_receive_frame(codec_context.get(), frame) == 0) {
// 检查帧尺寸是否匹配
if (frame->width != frame_width || frame->height != frame_height) {
std::cerr << "Frame size mismatch! Expected: " << frame_width << "x" << frame_height
<< ", Actual: " << frame->width << "x" << frame->height << std::endl;
continue;
}
uint8_t* dst_data[4] = {
cv_frame.data, nullptr, nullptr, nullptr };
int dst_linesize[4] = {
cv_frame.step, 0, 0, 0 };
sws_scale(sws_ctx, frame->data, frame->linesize, 0, frame_height,
dst_data, dst_linesize);
auto current_time = std::chrono::system_clock::now();
auto elapsed_time = std::chrono::duration_cast<std::chrono::minutes>(current_time - start_time).count();
if (elapsed_time > 0 || frame_count == 0) {
if (writer.isOpened()) {
writer.release();
}
start_time = current_time;
std::time_t now = std::chrono::system_clock::to_time_t(start_time);
std::tm* local_time = std::localtime(&now);
std::ostringstream oss;
oss << "output_" << std::put_time(local_time, "%Y%m%d%H%M%S") << ".mp4";
std::string filename = oss.str();
writer.open(filename, cv::VideoWriter::fourcc('H', '2', '6', '5'), fps, cv::Size(frame_width, frame_height));
}
writer.write(cv_frame);
cv::imshow("Video Stream", cv_frame);
if (cv::waitKey(1) == 27) {
break;
}
frame_count++;
}
}
av_packet_unref(&packet);
}
if (writer.isOpened()) {
writer.release();
}
av_frame_free(&frame);
sws_freeContext(sws_ctx);
avformat_close_input(&format_context);
cv::destroyAllWindows();
}
3:代码说明
代码是 叫 AI 写的,自己修正,感觉到完全能用 也废时间,不过比自己从头写起,要轻松许多
1>av_find_input_format(“dshow”) 不用dshow 找不到 usb摄像头
2>av_dict_set(&input_options, “rtbufsize”, “100000000”, 0); // 设置为 100MB //不设置容易缓存容易满,按需求设置大小
3>配置 H.265 编码器 按需修改
4> 把输入的格式 转为 h265再输出
// 分配图像转换上下文
SwsContext* sws_context = sws_getContext(
input_codec_context->width, input_codec_context->height, input_codec_context->pix_fmt,
h265_codec_context->width, h265_codec_context->height, h265_codec_context->pix_fmt,
SWS_BILINEAR, nullptr, nullptr, nullptr
);
5>查看当前计算机上连接的usb摄像头:
ffmpeg -list_devices true -f dshow -i dummy
4:运行测试
就是把use的流 转为h265 上传到媒体服务器,再从媒体服务器拉下来,保存本地同时用opencv播放
注意,直接用 cv::VideoCapture 打不开,所以用的ffpmeg 拉下来 再给cv播放;
cv::VideoCapture cap;
// 设置 RTSP 流的参数,提高兼容性
cap.set(cv::CAP_PROP_FOURCC, cv::VideoWriter::fourcc('H', '2', '6', '5'));
cap.set(cv::CAP_PROP_BUFFERSIZE, 3);
// 尝试打开 RTSP 流
if (!cap.open(rstp_url, cv::CAP_ANY)) {
std::cerr << "Could not open video stream: " << rstp_url << std::endl;
// 再次尝试使用 FFmpeg 后端打开
if (!cap.open(rstp_url, cv::CAP_FFMPEG)) {
std::cerr << "Failed to open video stream using FFmpeg backend." << std::endl;
return;
}
}
左边是拉流给cv播放的, 右边是 VLC media player 拉流播放的
4:如果对你又帮助,麻烦点个赞,加个关注
下章android 编译及测试