图片转视频并推流源码,已通过测试
前面一直卡在参数设置上面,通过多种办法没有搞定,今天通过Cusor修正,能正常推流,特记录下来,以备后续使用.

/**
* 图片推流程序
* 该程序将一张图片按指定帧率推送到RTMP服务器,可用于测试或模拟视频流
*/
#include <iostream>
#include <string>
#include <opencv2/opencv.hpp>
#include <thread>
typedef int i1;
extern "C" {
#include <libavformat/avformat.h> // FFmpeg格式处理库
#include <libavcodec/avcodec.h> // FFmpeg编解码库
#include <libavutil/avutil.h> // FFmpeg工具库
#include <libavutil/imgutils.h> // FFmpeg图像处理工具库
#include "libyuv.h" // YUV图像处理库
}
// 定义推流帧率
const int FRAME_RATE = 25;
// 定义推送总时长(秒)
const int DURATION_SECONDS = 10 * 60; // 10分钟
/**
* 音视频工具类
* 提供图像格式转换和错误处理等功能
*/
class AVUtils {
public:
/**
* 将OpenCV的RGB图像转换为FFmpeg的AVFrame(YUV420P格式)
* @param rgbImage OpenCV的Mat格式RGB图像
* @return 返回转换后的AVFrame指针,失败返回nullptr
*/
static AVFrame *CVMatToAVFrame(cv::Mat &rgbImage) {
// 检查输入图像是否有效
if (rgbImage.empty() || rgbImage.channels() != 3 ||
rgbImage.depth() != CV_8U) {
return nullptr;
}
// 分配AVFrame内存
AVFrame *pYuvFrame = av_frame_alloc();
if (pYuvFrame == nullptr) {
return nullptr;
}
// 设置帧的基本参数
pYuvFrame->width = rgbImage.cols;
pYuvFrame->height = rgbImage.rows;
pYuvFrame->format = AV_PIX_FMT_YUV420P;
cv::imwrite("1.jpg", rgbImage);
// 为帧分配存储空间
int ret = -1;
ret = av_frame_get_buffer(pYuvFrame, 0);
if (ret < 0) {
av_frame_unref(pYuvFrame);
av_frame_free(&pYuvFrame);
std::cerr << "AVUtils::MatToYUV420P,Could not allocate the video frame data!" << std::endl;
return nullptr;
}
// 使用libyuv进行RGB到YUV420P的转换
ret = libyuv::RGB24ToI420(rgbImage.data,
rgbImage.step,
pYuvFrame->data[0], // Y平面
pYuvFrame->linesize[0],
pYuvFrame->data[1], // U平面
pYuvFrame->linesize[1],
pYuvFrame->data[2], // V平面
pYuvFrame->linesize[2],
rgbImage.cols,
rgbImage.rows);
if (ret != 0) {
av_frame_free(&pYuvFrame);
pYuvFrame = nullptr;
std::cerr << "AVUtils::MatToYUV420P, libyuv::BGRAToI420 conversion failed!" << std::endl;
return nullptr;
}
return pYuvFrame;
}
/**
* 将FFmpeg错误码转换为可读的错误信息
* @param err FFmpeg错误码
* @return 返回错误描述字符串
*/
static char *AVErr2str(int err) {
static char str[AV_ERROR_MAX_STRING_SIZE];
memset(str, 0, sizeof(str));
return av_make_error_string(str, AV_ERROR_MAX_STRING_SIZE, err);
}
};
int main(int argc, char *argv[]) {
int ret = -1;
// 输入图片路径
const char *inputPath = "../images/5.jpg";
// RTMP推流地址
std::string rtmpUrl = "rtmp://192.168.3.240:1935/live/DOCK2024000003/165-0-7/normal-2";
// MP4文件名(使用时间戳命名)
std::string mp4Url = std::to_string(time(nullptr)) + ".mp4";
// 初始化FFmpeg网络模块
avformat_network_init();
// 使用OpenCV读取输入图片
cv::Mat image = cv::imread(inputPath);
if (image.empty()) {
std::cerr << "Could not read input image" << std::endl;
return EXIT_FAILURE;
}
// 将图片缩放到1920x1080分辨率
cv::resize(image, image, cv::Size(1920, 1080));
if (image.channels() != 3) {
std::cerr << "Input image should be in BGRA format." << std::endl;
return EXIT_FAILURE;
}
int width = image.cols;
int height = image.rows;
// 配置输出格式上下文
AVFormatContext *outCtx = nullptr;
const AVOutputFormat *outputFormat = av_guess_format("flv", nullptr, nullptr);
if (!outputFormat) {
std::cerr << "Could not guess output format" << std::endl;
return EXIT_FAILURE;
}
// 创建输出上下文
ret = avformat_alloc_output_context2(&outCtx, outputFormat, nullptr, rtmpUrl.c_str());
if (ret < 0) {
std::cerr << "Allocate RTMP output context failed!" << std::endl;
return EXIT_FAILURE;
}
if (!outCtx) {
std::cerr << "Could not create output context" << std::endl;
return EXIT_FAILURE;
}
// 查找H.264编码器
const AVCodec *outCodec = avcodec_find_encoder_by_name("libx264");
if (outCodec == nullptr) {
std::cerr << "H.264 encoder (libx264) not found1" << std::endl;
outCodec = avcodec_find_encoder(AV_CODEC_ID_H264);
}
if (outCodec == nullptr) {
std::cerr << "H.264 encoder (libx264) not found2" << std::endl;
return EXIT_FAILURE;
}
// 创建编码器上下文
AVCodecContext *outCodecCtx = avcodec_alloc_context3(outCodec);
if (!outCodecCtx) {
std::cerr << "Could not allocate codec context" << std::endl;
return EXIT_FAILURE;
}
// 配置编码器参数
outCodecCtx->codec_id = AV_CODEC_ID_H264; // 使用H.264编码
outCodecCtx->pix_fmt = AV_PIX_FMT_YUV420P; // 使用YUV420P像素格式
outCodecCtx->width = width; // 视频宽度
outCodecCtx->height = height; // 视频高度
outCodecCtx->time_base = (AVRational) {1, FRAME_RATE}; // 时间基准与帧率一致
outCodecCtx->framerate = {FRAME_RATE, 1}; // 帧率
outCodecCtx->bit_rate = 2000000; // 比特率
outCodecCtx->gop_size = FRAME_RATE * 2; // GOP大小设为2秒
outCodecCtx->max_b_frames = 0; // 不使用B帧
outCodecCtx->thread_count = 4; // 设置编码线程数
outCodecCtx->thread_type = FF_THREAD_FRAME; // 使用帧级多线程
outCodecCtx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER; // 添加全局头标志
// 设置H.264编码器参数
AVDictionary *options = nullptr;
av_dict_set(&options, "preset", "ultrafast", 0); // 使用最快的编码速度
av_dict_set(&options, "tune", "zerolatency", 0); // 最小延迟
// av_dict_set(&options, "profile", "baseline", 0); // 使用baseline profile
// av_dict_set(&options, "crf", "23", 0); // 设置CRF值
// 打开编码器
if (avcodec_open2(outCodecCtx, outCodec, &options) < 0) {
std::cerr << "Could not open codec" << std::endl;
return EXIT_FAILURE;
}
av_dict_free(&options);
// 创建输出流
AVStream *outStream = avformat_new_stream(outCtx, outCodec);
if (!outStream) {
std::cerr << "Could not create new stream" << std::endl;
return EXIT_FAILURE;
}
// 设置流的时间基
outStream->time_base = outCodecCtx->time_base;
outStream->avg_frame_rate = outCodecCtx->framerate;
// 从编码器上下文复制参数到流
ret = avcodec_parameters_from_context(outStream->codecpar, outCodecCtx);
if (ret < 0) {
std::cerr << "Failed to copy codec parameters to stream" << std::endl;
return EXIT_FAILURE;
}
// 配置RTMP连接参数
if (!(outCtx->oformat->flags & AVFMT_NOFILE)) {
AVDictionary *opts = nullptr;
// 设置RTMP传输参数
int maxDelay = 10 * 100 * 1000; // 增加最大延迟时间到1秒
int rwTimeout = 30 * 1000 * 1000; // 增加读写超时时间到30秒
int listenTime = 10 * 1000 * 1000; // 增加监听超时时间到10秒
av_dict_set_int(&opts, "max_delay", maxDelay, 0);
av_dict_set_int(&opts, "rw_timeout", rwTimeout, 0);
av_dict_set_int(&opts, "listen_time", listenTime, 0);
av_dict_set_int(&opts, "buffer_size", 1024 * 1024, 0); // 设置缓冲区大小
// 打开RTMP输出URL
ret = avio_open2(&outCtx->pb, rtmpUrl.c_str(), AVIO_FLAG_WRITE, nullptr, &opts);
if (opts != nullptr) { av_dict_free(&opts); }
if (ret < 0) {
std::cerr << "Could not open output URL" << std::endl;
if (opts != nullptr) { av_dict_free(&opts); }
return EXIT_FAILURE;
}
}
// 写入文件头
if (avformat_write_header(outCtx, nullptr) < 0) {
std::cerr << "Error writing header" << std::endl;
return EXIT_FAILURE;
}
// 计算需要推送的总帧数
int frameCount = FRAME_RATE * DURATION_SECONDS;
// 主循环:重复推送图片帧
for (int i = 0; i < frameCount; ++i) {
// 将图片转换为AVFrame
AVFrame *outFrame = AVUtils::CVMatToAVFrame(image);
if (!outFrame) {
std::cerr << "Failed to convert cv::Mat to AVFrame." << std::endl;
return EXIT_FAILURE;
}
// 创建数据包
AVPacket *outPacket = av_packet_alloc();
if (!outPacket) {
std::cerr << "Could not allocate packet" << std::endl;
av_frame_free(&outFrame);
return EXIT_FAILURE;
}
// 设置时间戳
outFrame->pts = i;
outFrame->pkt_dts = outFrame->pts;
outFrame->pkt_duration = 1;
// 发送帧到编码器
ret = avcodec_send_frame(outCodecCtx, outFrame);
if (ret < 0) {
std::cerr << "Error sending frame to encoder: " << AVUtils::AVErr2str(ret) << std::endl;
av_packet_free(&outPacket);
av_frame_free(&outFrame);
return EXIT_FAILURE;
}
// 从编码器接收编码后的数据包
while (ret >= 0) {
ret = avcodec_receive_packet(outCodecCtx, outPacket);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
break;
} else if (ret < 0) {
std::cerr << "Error receiving packet from encoder: " << AVUtils::AVErr2str(ret) << std::endl;
break;
}
// 设置数据包的时间戳
outPacket->pts = av_rescale_q(outPacket->pts, outCodecCtx->time_base, outStream->time_base);
outPacket->dts = av_rescale_q(outPacket->dts, outCodecCtx->time_base, outStream->time_base);
outPacket->duration = av_rescale_q(1, outCodecCtx->time_base, outStream->time_base);
outPacket->stream_index = outStream->index;
// 写入数据包到输出
ret = av_write_frame(outCtx, outPacket);
if (ret < 0) {
std::cerr << "Error writing packet: " << AVUtils::AVErr2str(ret) << std::endl;
break;
}
av_packet_unref(outPacket);
}
// 清理资源
av_packet_free(&outPacket);
av_frame_free(&outFrame);
}
// 写入文件尾
av_write_trailer(outCtx);
// 清理所有资源
avcodec_free_context(&outCodecCtx);
if (!(outCtx->oformat->flags & AVFMT_NOFILE)) {
avio_closep(&outCtx->pb);
}
avformat_free_context(outCtx);
return 0;
}
浙公网安备 33010602011771号