ffmpeg API

  1. 辅助函数

void av_dump_format(AVFormatContext *ic, int index, const char *url, int is_output);

ic the context to analyze

index index of the stream to dump information about-1表示由ffmpeg选择

url the URL to print, such as source or destination file

is_output Select whether the specified context is an input(0) or output(1)

Print detailed information about the input or output format, such as duration, bitrate, streams, container, programs, metadata, side data, codec and time base.

#define av_err2str(errnum) \

av_make_error_string((char[AV_ERROR_MAX_STRING_SIZE]){0}, AV_ERROR_MAX_STRING_SIZE, errnum)

打印错误信息。

  1. 注册

void av_register_all(void);

av_register_all() to register all compiled muxers, demuxers and protocols.

其调用了avcodec_register_all(),注册了所有的codec

void avdevice_register_all()

Initialize libavdevice and register all the input and output devices.

  1. 编码

/**

* Allocate an AVFormatContext for an output format.

* avformat_free_context() can be used to free the context and

* everything allocated by the framework within it.

*

* @param *ctx is set to the created format context, or to NULL in case of failure

* @param oformat format to use for allocating the context, if NULL

* format_name and filename are used instead

* @param format_name the name of output format to use for allocating the

* context, if NULL filename is used instead

* @param filename the name of the filename to use for allocating the context, may be NULL

* @return >= 0 in case of success, a negative AVERROR code in case of failure

*/

int avformat_alloc_output_context2(AVFormatContext **ctx, AVOutputFormat *oformat,

const char *format_name, const char *filename);

分配一个AVFormatContext结构体并寻找封装格式赋值给AVFormatContextoformat

int avio_open(AVIOContext **s, const char *url, int flags);

int avio_open2(AVIOContext **s, const char *url, int flags, const AVIOInterruptCB *int_cb, AVDictionary **options);

int avio_close(AVIOContext *s);

int avio_closep(AVIOContext **s);

Create and initialize a AVIOContext for accessing the resource indicated by url.

AVStream *avformat_new_stream(AVFormatContext *s, const AVCodec *c);

将音频流或者视频流的信息填充好,分配出AVStream结构体,在音频流中分配声道、采样率、表示格式、编码器等信息,在视频中分配宽、高、帧率、表示格式、编码器等信息。

int avformat_write_header(AVFormatContext *s, AVDictionary **options);

int av_write_trailer(AVFormatContext *s);

avformat_write_headerav_write_trailer成对调用否则crash

// only allocates the AVPacket itself, not the data buffers. av_new_packet()

AVPacket *av_packet_alloc(void);

void av_packet_free(AVPacket **pkt);

Void av_packet_unref(AVPacket **pkt);

// only allocates the AVFrame itself, not the data buffers. Av_frame_get_buffer()

AVFrame *av_frame_alloc(void);

void av_frame_free(AVFrame **frame);

/**

* Allocate new buffer(s) for audio or video data.

*

* The following fields must be set on frame before calling this function:

* - format (pixel format for video, sample format for audio)

* - width and height for video

* - nb_samples and channel_layout for audio

*

* This function will fill AVFrame.data and AVFrame.buf arrays and, if

* necessary, allocate and fill AVFrame.extended_data and AVFrame.extended_buf.

* For planar formats, one buffer will be allocated for each plane.

*

* @warning: if frame already has been allocated, calling this function will leak memory.

* In addition, undefined behavior can occur in certain cases.

*

* @param frame frame in which to store the new buffers.

* @param align Required buffer size alignment. If equal to 0, alignment will be

* chosen automatically for the current CPU. It is highly

* recommended to pass 0 here unless you know what you are doing.

*

* @return 0 on success, a negative AVERROR on error.

*/

int av_frame_get_buffer(AVFrame *frame, int align);

 

void av_init_packet(AVPacket *pkt);

int av_new_packet(AVPacket *pkt, int size);

Takes input raw video data from frame and writes the next output packet, if available, to avpkt. The output packet does not necessarily contain data for the most recent frame, as encoders can delay and reorder input frames internally as needed.

 

Parameters

avctx codec context

avpkt output AVPacket. The user can supply an output buffer by setting avpkt->data and avpkt->size prior to calling the function, but if the size of the user-provided data is not large enough, encoding will fail. All other AVPacket fields will be reset by the encoder using av_init_packet(). If avpkt->data is NULL, the encoder will allocate it. The encoder will set avpkt->size to the size of the output packet. The returned data (if any) belongs to the caller, he is responsible for freeing it.

If this function fails or produces no output, avpkt will be freed using av_packet_unref().

 

Parameters

[in] frame AVFrame containing the raw video data to be encoded. May be NULL when flushing an encoder that has the AV_CODEC_CAP_DELAY capability set.

[out] got_packet_ptr This field is set to 1 by libavcodec if the output packet is non-empty, and to 0 if it is empty. If the function returns an error, the packet can be assumed to be invalid, and the value of got_packet_ptr is undefined and should not be used.

int avcodec_encode_video2(AVCodecContext *avctx, AVPacket *avpkt,

const AVFrame *frame, int *got_packet_ptr);

 

int av_write_frame(AVFormatContext *s, AVPacket *pkt);

int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt);

 

  1. 解码

AVInputFormat *av_find_input_format(const char *short_name);

AVFormatContext *avformat_alloc_context(void);

void avformat_close_input(AVFormatContext **s);

/**

* Open an input stream and read the header. The codecs are not opened.

* The stream must be closed with avformat_close_input().

*

* @param ps Pointer to user-supplied AVFormatContext (allocated by avformat_alloc_context).

* May be a pointer to NULL, in which case an AVFormatContext is allocated by this

* function and written into ps.

* Note that a user-supplied AVFormatContext will be freed on failure.

* @param url URL of the stream to open.

* @param fmt If non-NULL, this parameter forces a specific input format.

* Otherwise the format is autodetected.

* @param options A dictionary filled with AVFormatContext and demuxer-private options.

* On return this parameter will be destroyed and replaced with a dict containing

* options that were not found. May be NULL.

*

* @return 0 on success, a negative AVERROR on failure.

*

* @note If you want to use custom IO, preallocate the format context and set its pb field.

*/

int avformat_open_input(AVFormatContext **ps, const char *url, AVInputFormat *fmt, AVDictionary **options);

使用avformat_open_input来打开一个文件,第一个参数是一个AVFormatContext指针变量的地址,它会根据打开的文件信息填充AVFormatContext,需要注意的是,此处的AVFormatContext必须为NULL或由avformat_alloc_context分配得到。

avformat_open_input函数只是读文件头,并不会填充流信息,要调用avformat_find_stream_info获取文件中的流信息,此函数会读取packet,并确定文件中所有的流信息,设置pFormatCtx->streams指向文件中的流,但此函数并不会改变文件指针,读取的packet会给后面的解码进行处理。(pFormatCtx->nb_streams是流类型数,一般为音视频,即2

/**

* Read packets of a media file to get stream information. This

* is useful for file formats with no headers such as MPEG. This

* function also computes the real framerate in case of MPEG-2 repeat

* frame mode.

* The logical file position is not changed by this function;

* examined packets may be buffered for later processing.

*

* @param ic media file handle

* @param options If non-NULL, an ic.nb_streams long array of pointers to

* dictionaries, where i-th member contains options for

* codec corresponding to i-th stream.

* On return each dictionary will be filled with options that were not found.

* @return >=0 if OK, AVERROR_xxx on error

*

* @note this function isn't guaranteed to open all the codecs, so

* options being non-empty at return is a perfectly normal behavior.

*

* @todo Let the user decide somehow what information is needed so that

* we do not waste time getting stuff the user does not need.

*/

int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options);

int av_read_frame(AVFormatContext *s, AVPacket *pkt);

对于音频流,一个AVPacket可能会包含多个AVFrame,但是对于一个视频流,一个AVPacket只包含一个AVFrame,该函数最终只会返回一个AVPacket结构体。

 

  1. 编解码器

通过IDname寻找编解码器。

AVCodec *avcodec_find_decoder(enum AVCodecID id);

AVCodec *avcodec_find_decoder_by_name(const char *name);

AVCodec *avcodec_find_encoder(enum AVCodecID id);

AVCodec *avcodec_find_encoder_by_name(const char *name);

分配AVCodecContext并初始化默认值。

/**

* Allocate an AVCodecContext and set its fields to default values. The

* resulting struct should be freed with avcodec_free_context().

*

* @param codec if non-NULL, allocate private data and initialize defaults

* for the given codec. It is illegal to then call avcodec_open2()

* with a different codec.

* If NULL, then the codec-specific defaults won't be initialized,

* which may result in suboptimal default settings (this is

* important mainly for encoders, e.g. libx264).

*

* @return An AVCodecContext filled with default values or NULL on failure.

*/

AVCodecContext *avcodec_alloc_context3(const AVCodec *codec);

void avcodec_free_context(AVCodecContext **avctx);

int avcodec_parameters_from_context(AVCodecParameters *par,const AVCodecContext *codec);

int avcodec_parameters_to_context(AVCodecContext *codec, const AVCodecParameters *par);

int avcodec_parameters_copy(AVCodecParameters *dst, const AVCodecParameters *src);

打开关闭编解码器

int avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options);

int avcodec_close(AVCodecContext *avctx);

编解码

// 发送原始数据包给解码器

int avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt);

// 接收解码后的数据帧

int avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame);

// 发送原始数据帧到编码器

int avcodec_send_frame(AVCodecContext *avctx, const AVFrame *frame);

// 接收编码后的数据包

int avcodec_receive_packet(AVCodecContext *avctx, AVPacket *avpkt);

  1. 格式转换

sws_scale可以对图像缩放和格式转行,可以使用不同算法对图像进行处理。

参考:ffmpeg中的sws_scale算法性能测试 雷宵骅

#define SWS_FAST_BILINEAR 1

#define SWS_BILINEAR 2

#define SWS_BICUBIC 4

#define SWS_X 8

#define SWS_POINT 0x10

#define SWS_AREA 0x20

#define SWS_BICUBLIN 0x40

#define SWS_GAUSS 0x80

#define SWS_SINC 0x100

#define SWS_LANCZOS 0x200

#define SWS_SPLINE 0x400

ffmpeg -s 480x272 -pix_fmt yuv420p -i src01_480x272.yuv -s 1280x720 -sws_flags bilinear -pix_fmt yuv420p src01_bilinear_1280x720.yuv

ffmpeg -s 480x272 -pix_fmt yuv420p -i src01_480x272.yuv -s 1280x720 -sws_flags bicubic -pix_fmt yuv420p src01_bicubic_1280x720.yuv

ffmpeg -s 480x272 -pix_fmt yuv420p -i src01_480x272.yuv -s 1280x720 -sws_flags neighbor -pix_fmt yuv420p src01_neighbor_1280x720.yuv

如果对图像的缩放,要追求高效,比如说是视频图像的处理,在不明确是放大还是缩小时,直接使用SWS_FAST_BILINEAR算法即可。如果明确是要缩小并显示,建议使用Point算法,如果是明确要放大并显示,其实使用CImageStrech更高效。

struct SwsContext *sws_getContext(int srcW, int srcH, enum AVPixelFormat srcFormat,

int dstW, int dstH, enum AVPixelFormat dstFormat,

int flags, SwsFilter *srcFilter,

SwsFilter *dstFilter, const double *param);

srcW the width of the source image

srcH the height of the source image

srcFormat the source image format

dstW the width of the destination image

dstH the height of the destination image

dstFormat the destination image format

flags specify which algorithm and options to use for rescaling

param extra parameters to tune the used scaler. For SWS_BICUBIC param[0] and [1] tune the shape of the basis function, param[0] tunes f(1) and param[1] f´(1). For SWS_GAUSS param[0] tunes the exponent and thus cutoff frequency. For SWS_LANCZOS param[0] tunes the width of the window function

void sws_freeContext(struct SwsContext *swsContext);

int sws_scale(struct SwsContext *c, const uint8_t *const srcSlice[],

const int srcStride[], int srcSliceY, int srcSliceH,

uint8_t *const dst[], const int dstStride[]);

c the scaling context previously created with sws_getContext()

srcSlice the array containing the pointers to the planes of the source slice各颜色通道的buffer指针数组

srcStride the array containing the strides for each plane of the source image各颜色通道的每行字节数数组

srcSliceY the position in the source image of the slice to process, that is the number (counted starting from zero) in the image of the first row of the slice输入图像数据的第多少行开始逐行扫描,通常为0

srcSliceH the height of the source slice, that is the number of rows in the slice扫描多少行,通常为图像高度

dst the array containing the pointers to the planes of the destination image各颜色通道的buffer指针数组

dstStride the array containing the strides for each plane of the destination image各颜色通道的每行字节数数组

sws_ctx = sws_getContext( pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt,

dst_w, dst_h, dst_pix_fmt, SWS_BILINEAR, NULL, NULL, NULL);

  1. Audio

Audio重采样,采样格式转换和混流需要使用libswresample库。

音频交互使用SwrContext(通过swr_alloc()swr_alloc_set_opts()分配),参数必须通过AVOptions设置。调用swr_init()初始化SwrContext,音频转换通过重复调用swr_convert()At the end of conversion the resampling buffer can be flushed by calling swr_convert() with NULL in and 0 in_count.最后swr_free()释放。

输入输出间延迟可通过swr_get_delay()获取。

SwrContext *swr = swr_alloc();

av_opt_set_channel_layout(swr, "in_channel_layout", AV_CH_LAYOUT_5POINT1, 0);

av_opt_set_channel_layout(swr, "out_channel_layout", AV_CH_LAYOUT_STEREO, 0);

av_opt_set_int(swr, "in_sample_rate", 48000, 0);

av_opt_set_int(swr, "out_sample_rate", 44100, 0);

av_opt_set_sample_fmt(swr, "in_sample_fmt", AV_SAMPLE_FMT_FLTP, 0);

av_opt_set_sample_fmt(swr, "out_sample_fmt", AV_SAMPLE_FMT_S16, 0);

uint8_t **input;

int in_samples;

while (get_input(&input, &in_samples)) {

uint8_t *output;

int out_samples = av_rescale_rnd(swr_get_delay(swr, 48000) +in_samples, 44100, 48000, AV_ROUND_UP);

av_samples_alloc(&output, NULL, 2, out_samples, AV_SAMPLE_FMT_S16, 0);

out_samples = swr_convert(swr, &output, out_samples, input, in_samples);

handle_output(output, out_samples);

av_freep(&output);

}

函数

struct SwrContext *swr_alloc(void);

int swr_init(struct SwrContext *s);

struct SwrContext *swr_alloc_set_opts(struct SwrContext *s,

int64_t out_ch_layout, enum AVSampleFormat out_sample_fmt, int out_sample_rate,

int64_t in_ch_layout, enum AVSampleFormat in_sample_fmt, int in_sample_rate,

int log_offset, void *log_ctx);

void swr_free(struct SwrContext **s);

/** Convert audio.

*

* in and in_count can be set to 0 to flush the last few samples out at the end.

*

* If more input is provided than output space, then the input will be buffered.

* You can avoid this buffering by using swr_get_out_samples() to retrieve an

* upper bound on the required number of output samples for the given number of

* input samples. Conversion will run directly without copying whenever possible.

*

* @param s allocated Swr context, with parameters set

* @param out output buffers, only the first one need be set in case of packed audio

* @param out_count amount of space available for output in samples per channel

* @param in input buffers, only the first one need to be set in case of packed audio

* @param in_count number of input samples available in one channel

*

* @return number of samples output per channel, negative value on error

*/

int swr_convert(struct SwrContext *s, uint8_t **out, int out_count, const uint8_t **in , int in_count);

base timebase in which the returned delay will be:

if it's set to 1 the returned delay is in seconds

if it's set to 1000 the returned delay is in milliseconds

if it's set to the input sample rate then the returned delay is in input samples

if it's set to the output sample rate then the returned delay is in output samples

if it's the least common multiple of in_sample_rate and out_sample_rate then an exact rounding-free delay will be returned

Returns the delay in 1 / base units.

int64_t swr_get_delay(struct SwrContext *s, int64_t base);

 

#define AV_CH_FRONT_LEFT 0x00000001

#define AV_CH_FRONT_RIGHT 0x00000002

#define AV_CH_FRONT_CENTER 0x00000004

#define AV_CH_LOW_FREQUENCY 0x00000008

#define AV_CH_BACK_LEFT 0x00000010

#define AV_CH_BACK_RIGHT 0x00000020

#define AV_CH_FRONT_LEFT_OF_CENTER 0x00000040

#define AV_CH_FRONT_RIGHT_OF_CENTER 0x00000080

#define AV_CH_BACK_CENTER 0x00000100

#define AV_CH_SIDE_LEFT 0x00000200

#define AV_CH_SIDE_RIGHT 0x00000400

#define AV_CH_TOP_CENTER 0x00000800

#define AV_CH_TOP_FRONT_LEFT 0x00001000

#define AV_CH_TOP_FRONT_CENTER 0x00002000

#define AV_CH_TOP_FRONT_RIGHT 0x00004000

#define AV_CH_TOP_BACK_LEFT 0x00008000

#define AV_CH_TOP_BACK_CENTER 0x00010000

#define AV_CH_TOP_BACK_RIGHT 0x00020000

#define AV_CH_STEREO_LEFT 0x20000000 ///< Stereo downmix.

#define AV_CH_STEREO_RIGHT 0x40000000 ///< See AV_CH_STEREO_LEFT.

#define AV_CH_WIDE_LEFT 0x0000000080000000ULL

#define AV_CH_WIDE_RIGHT 0x0000000100000000ULL

#define AV_CH_SURROUND_DIRECT_LEFT 0x0000000200000000ULL

#define AV_CH_SURROUND_DIRECT_RIGHT 0x0000000400000000ULL

#define AV_CH_LOW_FREQUENCY_2 0x0000000800000000ULL

 

/** Channel mask value used for AVCodecContext.request_channel_layout

to indicate that the user requests the channel order of the decoder output

to be the native codec channel order. */

#define AV_CH_LAYOUT_NATIVE 0x8000000000000000ULL

 

#define AV_CH_LAYOUT_MONO (AV_CH_FRONT_CENTER)

#define AV_CH_LAYOUT_STEREO (AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT)

#define AV_CH_LAYOUT_2POINT1 (AV_CH_LAYOUT_STEREO|AV_CH_LOW_FREQUENCY)

#define AV_CH_LAYOUT_2_1 (AV_CH_LAYOUT_STEREO|AV_CH_BACK_CENTER)

 

enum AVSampleFormat {

AV_SAMPLE_FMT_NONE = -1,

AV_SAMPLE_FMT_U8, ///< unsigned 8 bits

AV_SAMPLE_FMT_S16, ///< signed 16 bits

AV_SAMPLE_FMT_S32, ///< signed 32 bits

AV_SAMPLE_FMT_FLT, ///< float

AV_SAMPLE_FMT_DBL, ///< double

 

AV_SAMPLE_FMT_U8P, ///< unsigned 8 bits, planar

AV_SAMPLE_FMT_S16P, ///< signed 16 bits, planar

AV_SAMPLE_FMT_S32P, ///< signed 32 bits, planar

AV_SAMPLE_FMT_FLTP, ///< float, planar

AV_SAMPLE_FMT_DBLP, ///< double, planar

AV_SAMPLE_FMT_S64, ///< signed 64 bits

AV_SAMPLE_FMT_S64P, ///< signed 64 bits, planar

 

AV_SAMPLE_FMT_NB ///< Number of sample formats. DO NOT USE if linking dynamically

};

/**

* Get the required buffer size for the given audio parameters.

*

* @param[out] linesize calculated linesize, may be NULL

* @param nb_channels the number of channels

* @param nb_samples the number of samples in a single channel

* @param sample_fmt the sample format

* @param align buffer size alignment (0 = default, 1 = no alignment)

* @return required buffer size, or negative error code on failure

*/

int av_samples_get_buffer_size(int *linesize, int nb_channels, int nb_samples,

enum AVSampleFormat sample_fmt, int align);

/**

* Allocate a samples buffer for nb_samples samples, and fill data pointers and linesize accordingly.

* The allocated samples buffer can be freed by using av_freep(&audio_data[0])

* Allocated data will be initialized to silence.

*

* @see enum AVSampleFormat

* The documentation for AVSampleFormat describes the data layout.

*

* @param[out] audio_data array to be filled with the pointer for each channel

* @param[out] linesize aligned size for audio buffer(s), may be NULL

* @param nb_channels number of audio channels

* @param nb_samples number of samples per channel

* @param align buffer size alignment (0 = default, 1 = no alignment)

* @return >=0 on success or a negative error code on failure

* @todo return the size of the allocated buffer in case of success at the next bump

* @see av_samples_fill_arrays()

* @see av_samples_alloc_array_and_samples()

*/

int av_samples_alloc(uint8_t **audio_data, int *linesize, int nb_channels,

int nb_samples, enum AVSampleFormat sample_fmt, int align);

// This is the same as av_samples_alloc(), but also allocates the data pointers array.

int av_samples_alloc_array_and_samples(uint8_t ***audio_data, int *linesize, int nb_channels,

int nb_samples, enum AVSampleFormat sample_fmt, int align);

 

音频编码

AVCodecContext int frame_size;//Samples per packet

对于ffmpeg音频的codec,好像每次只能编这个数量的采样

AVFrame int nb_samples; //number of audio samples (per channel) described by this frame

对于ffmpeg音频的frame,表示frame中采样的数量

一般设置AVFrame.nb_samples = AVCodecContext.frame_size;//估计是这样的,但是没有验证

pAudioFrame = avcodec_alloc_frame();

pAudioFrame->nb_samples= pAudioEncodeCtx->frame_size;

pAudioFrame->format= pAudioEncodeCtx->sample_fmt;

 

//依据channelnb_samplesample_fmt 计算frame的数据块的大小

int size = av_samples_get_buffer_size(NULL, pAudioEncodeCtx->channels, pAudioEncodeCtx->frame_size, pAudioEncodeCtx->sample_fmt, 1);

uint8_t * frame_buf = (uint8_t *)av_malloc(size);

//依据channelnb_samplesample_fmt frame的数据块的大小,设置frame中的信息

avcodec_fill_audio_frame(pAudioFrame, pAudioEncodeCtx->channels, pAudioEncodeCtx->sample_fmt,(const uint8_t*)frame_buf, size, 1);

 

while (1)
    {

 int readSize = fread(frame_buf, 1, size, fInputPCM);

if (readSize <= 0) {

           break;

 }

       pAudioFrame->data[0] = frame_buf;  //采样信号

       int got_frame = 0;

 

       int ret = avcodec_encode_audio2(pAudioEncodeCtx, &AudioPacket, pAudioFrame, &got_frame);

 

  1. Timestamp

pts:19960 pts_time:19.96 dts:19960 dts_time:19.96 duration:0 duration_time:0 stream_index:0

pts:19854 pts_time:19.854 dts:19854 dts_time:19.854 duration:26 duration_time:0.026 stream_index:1

#define AV_NOPTS_VALUE ((int64_t)UINT64_C(0x8000000000000000))

#define AV_TIME_BASE 1000000

#define AV_TIME_BASE_Q (AVRational){1, AV_TIME_BASE}

/**

* Rational number (pair of numerator and denominator). 有理数,用Q表示有理数集

*/

typedef struct AVRational{

int num; ///< Numerator

int den; ///< Denominator // 分母

} AVRational;

static inline AVRational av_make_q(int num, int den)

{

AVRational r = { num, den };

return r;

}

enum AVRounding {

AV_ROUND_ZERO = 0, ///< Round toward zero. 趋近于0

AV_ROUND_INF = 1, ///< Round away from zero. 趋远于0

AV_ROUND_DOWN = 2, ///< Round toward -infinity. 趋于更小的整数

AV_ROUND_UP = 3, ///< Round toward +infinity. 趋于更大的整数

AV_ROUND_NEAR_INF = 5, ///< Round to nearest and halfway cases away from zero

// 四舍五入,小于0.5取值趋向0,大于0.5取值趋远于0

/**

* Flag telling rescaling functions to pass `INT64_MIN`/`MAX` through

* unchanged, avoiding special cases for #AV_NOPTS_VALUE.

*

* Unlike other values of the enumeration AVRounding, this value is a

* bitmask that must be used in conjunction with another value of the

* enumeration through a bitwise OR, in order to set behavior for normal

* cases.

*

* @code{.c}

* av_rescale_rnd(3, 1, 2, AV_ROUND_UP | AV_ROUND_PASS_MINMAX);

* // Rescaling 3:

* // Calculating 3 * 1 / 2

* // 3 / 2 is rounded up to 2

* // => 2

*

* av_rescale_rnd(AV_NOPTS_VALUE, 1, 2, AV_ROUND_UP | AV_ROUND_PASS_MINMAX);

* // Rescaling AV_NOPTS_VALUE:

* // AV_NOPTS_VALUE == INT64_MIN

* // AV_NOPTS_VALUE is passed through

* // => AV_NOPTS_VALUE

* @endcode

*/

AV_ROUND_PASS_MINMAX = 8192,

};

int64_t av_rescale(int64_t a, int64_t b, int64_t c) av_const;

int64_t av_rescale_rnd(int64_t a, int64_t b, int64_t c, enum AVRounding rnd) av_const;

/**

* Rescale a 64-bit integer by 2 rational numbers.

*

* The operation is mathematically equivalent to `a * bq / cq`.

*

* This function is equivalent to av_rescale_q_rnd() with #AV_ROUND_NEAR_INF.

*

* @see av_rescale(), av_rescale_rnd(), av_rescale_q_rnd()

*/

int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq) av_const;

/**

* Rescale a 64-bit integer by 2 rational numbers with specified rounding.

*

* The operation is mathematically equivalent to `a * bq / cq`.

*

* @see av_rescale(), av_rescale_rnd(), av_rescale_q()

*/

int64_t av_rescale_q_rnd(int64_t a, AVRational bq, AVRational cq, enum AVRounding rnd) av_const;

/**

* @return One of the following values:

* - 0 if `a == b`

* - 1 if `a > b`

* - -1 if `a < b`

* - `INT_MIN` if one of the values is of the form `0 / 0`

*/

static inline int av_cmp_q(AVRational a, AVRational b){

const int64_t tmp= a.num * (int64_t)b.den - b.num * (int64_t)a.den;

 

if(tmp) return (int)((tmp ^ a.den ^ b.den)>>63)|1;

else if(b.den && a.den) return 0;

else if(a.num && b.num) return (a.num>>31) - (b.num>>31);

else return INT_MIN;

}

// Convert an AVRational to a `double`.

static inline double av_q2d(AVRational a){

return a.num / (double) a.den;

}

int av_compare_ts(int64_t ts_a, AVRational tb_a, int64_t ts_b, AVRational tb_b);

// convert valid timing fields (timestamps / durations) in a packet from one timebase to another.

// Timestamps with unknown values (AV_NOPTS_VALUE) will be ignored.

void av_packet_rescale_ts(AVPacket *pkt, AVRational tb_src, AVRational tb_dst);

ts转化为string

static inline char *av_ts_make_string(char *buf, int64_t ts)

{

if (ts == AV_NOPTS_VALUE) snprintf(buf, AV_TS_MAX_STRING_SIZE, "NOPTS");

else snprintf(buf, AV_TS_MAX_STRING_SIZE, "%" PRId64, ts);

return buf;

}

static inline char *av_ts_make_time_string(char *buf, int64_t ts, AVRational *tb)

{

if (ts == AV_NOPTS_VALUE) snprintf(buf, AV_TS_MAX_STRING_SIZE, "NOPTS");

else snprintf(buf, AV_TS_MAX_STRING_SIZE, "%.6g", av_q2d(*tb) * ts);

return buf;

}

#define av_ts2str(ts) av_ts_make_string((char[AV_TS_MAX_STRING_SIZE]){0}, ts)

#define av_ts2timestr(ts, tb) av_ts_make_time_string((char[AV_TS_MAX_STRING_SIZE]){0}, ts, tb)

编译

gcc avmux.c -lavformat -lavcodec -lavdevice -lavutil -lswscale -lswresample -lm

$CC avmux.c -o vcapture -I/home/wang/repoc/ffmpeg_cross/ffmpeg_arm/include -L/home/wang/repoc/ffmpeg_cross/ffmpeg_arm/lib -lavformat -lavcodec -lavdevice -lavutil -lswscale -lswresample -lavfilter -lpostproc -lm 

gcc 1.c -g -o fcamera   `pkg-config "libavcodec" --cflags --libs` `pkg-config "libavformat" --cflags --libs` `pkg-config "libavutil" --cflags --libs` `pkg-config "libswresample" --cflags --libs`  `pkg-config "libswscale" --cflags --libs` `pkg-config "libavdevice" --cflags --libs`

 

参考:

1. 关于视频编解码工具--ffmpeg

2. ffmpeg-libav-tutorial ffmpeg应用教程

3. http://slhck.info/ffmpeg-encoding-course/

4. 学习FFmpeg API – 解码视频

5. C语言的面向对象设计 —— 对 X264/FFMPEG 架构探讨

 

posted @ 2016-12-20 23:26  yuxi_o  阅读(1188)  评论(0编辑  收藏  举报