#include <chrono>
#include <thread>
#include <iostream>
extern "C" {
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libavutil/opt.h>
}
#define INPUT_URL "rtsp://admin:abc12345@192.168.1.172:554/H.264/ch1/main/av_stream"
#define OUTPUT_URL "rtmp://192.168.3.240:1935/live/DOCK2024000003/165-0-7/normal-3"
#define TARGET_FPS 25
#define H264_PRESET "fast" // 编码预设参数:ml-citation{ref="4,5" data="citationList"}
/**
* 错误码转换打印
* @param err
* @return
*/
char *AvErr2str(int err) {
static char str[AV_ERROR_MAX_STRING_SIZE];
memset(str, 0, sizeof(str));
return av_make_error_string(str, AV_ERROR_MAX_STRING_SIZE, err);
}
int main() {
int ret = 0;
AVFormatContext *inputCtx = nullptr, *outputCtx = nullptr;
AVCodecContext *decCtx = nullptr, *encCtx = nullptr;
AVDictionary *inputOpts = nullptr;
/* ==================== 1. 全局初始化 ==================== */
avformat_network_init(); // 初始化网络协议:ml-citation{ref="2,3" data="citationList"}
/* ==================== 2. 输入流配置 ==================== */
// 设置RTSP传输参数:ml-citation{ref="2,3" data="citationList"}
int timeout = 10 * 1000; // 10秒超时
av_dict_set(&inputOpts, "rtsp_transport", "tcp", 0);
av_dict_set(&inputOpts, "rw_timeout", "500000", 0); //设置超时参数(单位是微秒)
av_dict_set_int(&inputOpts, "stimeout", timeout, 0); //设置超时断开连接时间,单位微秒
av_dict_set(&inputOpts, "max_delay", "500000", 0); //设置最大时延
//av_dict_set(&inputOpts, "buffer_size", "1024000", 0); // 输入缓冲区
inputCtx = avformat_alloc_context();
inputCtx->flags |= AVFMT_FLAG_NOBUFFER;
// 打开输入流:ml-citation{ref="1,5" data="citationList"}
if ((ret = avformat_open_input(&inputCtx, INPUT_URL, nullptr, &inputOpts)) < 0) {
fprintf(stderr, "输入流打开失败: %s\n", AvErr2str(ret));
exit(0);
}
inputCtx->probesize = 1024 * 10 ^ 3;
inputCtx->max_analyze_duration = 5 * AV_TIME_BASE;
// 获取流信息:ml-citation{ref="1,7" data="citationList"}
if ((ret = avformat_find_stream_info(inputCtx, nullptr)) < 0) {
fprintf(stderr, "流信息获取失败: %s\n", AvErr2str(ret));
exit(0);
}
/* ==================== 3. 视频流初始化 ==================== */
// 查找视频流索引:ml-citation{ref="1,3" data="citationList"}
int videoIdx = av_find_best_stream(inputCtx, AVMEDIA_TYPE_VIDEO, -1, -1, nullptr, 0);
if (videoIdx < 0) {
fprintf(stderr, "未找到视频流\n");
exit(0);
}
AVStream *inStream = inputCtx->streams[videoIdx];
// 初始化解码器:ml-citation{ref="1,7" data="citationList"}
const AVCodec *decoder = avcodec_find_decoder(inStream->codecpar->codec_id);
if (!decoder) {
fprintf(stderr, "解码器未找到\n");
exit(0);
}
decCtx = avcodec_alloc_context3(decoder);
if ((ret = avcodec_parameters_to_context(decCtx, inStream->codecpar)) < 0) {
fprintf(stderr, "解码参数复制失败: %s\n", AvErr2str(ret));
exit(0);
}
if ((ret = avcodec_open2(decCtx, decoder, nullptr)) < 0) {
fprintf(stderr, "解码器打开失败: %s\n", AvErr2str(ret));
exit(0);
}
/* ==================== 4. 输出流配置 ==================== */
const AVOutputFormat *outputFmt = av_guess_format("flv", nullptr, nullptr);
if (!outputFmt) {
fprintf(stderr, "Error guessing output format");
exit(0);
}
outputCtx = avformat_alloc_context();
// 创建输出上下文:ml-citation{ref="4,5" data="citationList"}
if ((ret = avformat_alloc_output_context2(&outputCtx, outputFmt, nullptr, OUTPUT_URL)) < 0) {
fprintf(stderr, "输出上下文创建失败: %s\n", AvErr2str(ret));
exit(0);
}
// 配置H264编码器参数:ml-citation{ref="4,5" data="citationList"}
const AVCodec *encoder = avcodec_find_encoder(AV_CODEC_ID_H264);
encCtx = avcodec_alloc_context3(encoder);
encCtx->codec_id = AV_CODEC_ID_H264;
encCtx->width = decCtx->width; // 保持原始分辨率
encCtx->height = decCtx->height;
encCtx->pix_fmt = decCtx->pix_fmt; // 复用输入格式▲
encCtx->time_base = (AVRational) {1, 25};
encCtx->framerate = (AVRational) {25, 1};
encCtx->gop_size = decCtx->gop_size; // 关键帧间隔:ml-citation{ref="4" data="citationList"}
encCtx->max_b_frames = encCtx->max_b_frames; // B帧数量限制:ml-citation{ref="5" data="citationList"}
std::cout << "decCtx->bit_rate:" << decCtx->bit_rate << std::endl;
std::cout << "decCtx->gop_size:" << decCtx->gop_size << std::endl;
std::cout << "decCtx->time_base.num:" << decCtx->time_base.num << std::endl;
std::cout << "decCtx->time_base.den:" << decCtx->time_base.den << std::endl;
std::cout << "decCtx->framerate.num:" << decCtx->framerate.num << std::endl;
std::cout << "decCtx->framerate.den:" << decCtx->framerate.den << std::endl;
encCtx->bit_rate = (uint64_t) (decCtx->width * decCtx->height / 2.0);
encCtx->gop_size = 25;
encCtx->qcompress = 0.5;
//最大和最小量化系数
encCtx->qmin = 10;
encCtx->qmax = 51;
//设置0表示不使用B帧,b 帧越多,图片越小
encCtx->max_b_frames = 0;
AVDictionary *opts = nullptr;
//H.264
if (encCtx->codec_id == AV_CODEC_ID_H264) {
av_dict_set(&opts, "preset", "superfast", 0);
av_dict_set(&opts, "tune", "zerolatency", 0);
}
//av_opt_set(encCtx->priv_data, "preset", H264_PRESET, 0); // 编码预设:ml-citation{ref="4,5" data="citationList"}
if ((ret = avcodec_open2(encCtx, encoder, &opts)) < 0) {
fprintf(stderr, "编码器打开失败: %s\n", AvErr2str(ret));
exit(0);
}
// 创建输出流:ml-citation{ref="1,4" data="citationList"}
AVStream *outStream = avformat_new_stream(outputCtx, nullptr);
if ((ret = avcodec_parameters_copy(outStream->codecpar, inStream->codecpar)) < 0) {
fprintf(stderr, "编码参数复制失败: %s\n", AvErr2str(ret));
exit(0);
}
outStream->time_base = encCtx->time_base;
outStream->codecpar->codec_tag = 0;
// 打开输出IO:ml-citation{ref="4,5" data="citationList"}
if (!(outputCtx->oformat->flags & AVFMT_NOFILE)) {
// 设置RTMP传输为TCP
AVDictionary *opts = nullptr;
int timeout = 10 * 1000 * 1000; // 10秒超时
av_dict_set_int(&opts, "max_delay", 1000000, 0);
av_dict_set_int(&opts, "rw_timeout", timeout, 0); //设置超时参数(单位是微秒)
av_dict_set_int(&opts, "listen_time", timeout, 0); //设置超时断开连接时间,单位微秒
//av_dict_set(&opts, "timeout", "10000000", 0); //设置超时断开连接时间,单位微秒
ret = avio_open2(&outputCtx->pb, OUTPUT_URL, AVIO_FLAG_WRITE, nullptr, &opts);
if (opts != nullptr) { av_dict_free(&opts); }
if (ret < 0) {
fprintf(stderr, "输出IO打开失败: %s\n", AvErr2str(ret));
exit(0);
}
}
if ((ret = avformat_write_header(outputCtx, nullptr)) < 0) {
fprintf(stderr, "文件头写入失败: %s\n", AvErr2str(ret));
exit(0);
}
/* ==================== 5. 转码处理循环 ==================== */
AVPacket *pkt = av_packet_alloc();
AVPacket *pkt2 = av_packet_alloc();
AVFrame *frame = av_frame_alloc();
int64_t frameCount = 0;
int64_t lastPts = 0;
while (av_read_frame(inputCtx, pkt) >= 0) {
if (pkt->stream_index == videoIdx) {
// // 如果 PTS 是无效的,则根据上一个有效 PTS 和帧率来估算新的 PTS
// if (pkt->pts == AV_NOPTS_VALUE) {
// pkt->pts = lastPts + av_rescale_q(1, av_inv_q(encCtx->framerate), outStream->time_base);
// pkt->dts = pkt->pts;
// } else {
// lastPts = pkt->pts;
// }
//
// // 转换时间戳到RTMP输出流的时间基准
// //av_packet_rescale_ts(pkt, encCtx->time_base, outStream->time_base);
//
// if ((ret = av_interleaved_write_frame(outputCtx, pkt)) < 0) {
// fprintf(stderr, "数据写入失败: %d, %s\n", ret, AvErr2str(ret));
// }
if (1) {
// 解码步骤:ml-citation{ref="1,7" data="citationList"}
if ((ret = avcodec_send_packet(decCtx, pkt)) < 0) {
//fprintf(stderr, "解码错误: %s\n", AvErr2str(ret));
continue;
}
while (ret >= 0) {
ret = avcodec_receive_frame(decCtx, frame);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) break;
if (ret < 0) {
fprintf(stderr, "解码帧接收错误: %s\n", AvErr2str(ret));
break;
}
// 编码步骤:ml-citation{ref="4,5" data="citationList"}
frame->pts = frameCount++; // 手动生成时间戳:ml-citation{ref="1,3" data="citationList"}
if ((ret = avcodec_send_frame(encCtx, frame)) < 0) {
fprintf(stderr, "编码错误: %s\n", AvErr2str(ret));
break;
}
while (ret >= 0) {
ret = avcodec_receive_packet(encCtx, pkt2);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) break;
if (ret < 0) {
fprintf(stderr, "编码包接收错误: %s\n", AvErr2str(ret));
break;
}
// 时间戳转换:ml-citation{ref="3,4" data="citationList"}
av_packet_rescale_ts(pkt2, encCtx->time_base, outStream->time_base);
pkt->stream_index = outStream->index;
// 写入输出流:ml-citation{ref="1,5" data="citationList"}
if ((ret = av_interleaved_write_frame(outputCtx, pkt2)) < 0) {
fprintf(stderr, "数据写入失败: %d, %s\n", ret, AvErr2str(ret));
}
av_packet_unref(pkt2);
}
}
}
av_packet_unref(pkt);
// 添加一个短暂的休眠,避免CPU占用过高
std::this_thread::sleep_for(std::chrono::microseconds(10));
}
av_packet_unref(pkt);
}
/* ==================== 6. 资源回收 ==================== */
cleanup:
if (outputCtx) {
av_write_trailer(outputCtx);
if (!(outputCtx->oformat->flags & AVFMT_NOFILE)) avio_closep(&outputCtx->pb);
avformat_free_context(outputCtx);
}
if (inputCtx) avformat_close_input(&inputCtx);
if (decCtx) avcodec_free_context(&decCtx);
if (encCtx) avcodec_free_context(&encCtx);
av_dict_free(&inputOpts);
av_packet_free(&pkt);
av_frame_free(&frame);
return ret < 0 ? 1 : 0;
}