FFmpeg提取视频参数,以及剪辑视频,拼接视频,合并视频,抽帧等
视频封面图获取
# ifndef _BUFFER_CONTAINER_H_
# define _BUFFER_CONTAINER_H_
# include <Memory>
template < typename T >
class BufferContainer
{
public :
BufferContainer ( )
{
m_buffer = nullptr ;
m_length = 0 ;
}
BufferContainer ( const T & t1, int length)
{
if ( t1 && length > 0 )
{
m_buffer = new T[ length] ;
memset ( m_buffer, 0 , length* sizeof ( T) ) ;
m_length = length;
memcpy ( m_buffer, & t1, length * sizeof ( T) ) ;
}
}
void setData ( T* pData, int length) {
if ( pData&& m_buffer) {
delete [ ] m_buffer;
m_buffer= nullptr ;
}
if ( length> 0 && m_length> 0 ) {
m_length= 0 ;
}
m_buffer= new T[ length] ;
memset ( m_buffer, 0 , length* sizeof ( T) ) ;
m_length = length;
memcpy ( m_buffer, pData, length * sizeof ( T) ) ;
}
BufferContainer ( const BufferContainer & other)
{
if ( other. m_buffer && other. m_length > 0 )
{
m_buffer = new T[ other. m_length] ;
memset ( m_buffer, 0 , other. m_length* sizeof ( T) ) ;
m_length = other. m_length;
memcpy ( m_buffer, other. m_buffer, other. m_length * sizeof ( T) ) ;
}
}
~ BufferContainer ( )
{
if ( m_buffer)
{
delete [ ] m_buffer;
m_buffer = nullptr ;
m_length = 0 ;
}
}
BufferContainer & operator = ( const BufferContainer & other)
{
{
delete [ ] m_buffer;
m_buffer = nullptr ;
m_length = 0 ;
if ( other. m_buffer)
{
m_buffer = new T[ other. m_length] ;
memset ( m_buffer, 0 , other. m_length* sizeof ( T) ) ;
m_length = other. m_length;
memcpy ( m_buffer, other. m_buffer, sizeof ( T) * other. m_length) ;
}
}
return * this ;
}
T * data ( )
{
return m_buffer;
}
int length ( )
{
return m_length;
}
private :
T * m_buffer = nullptr ;
int m_length = 0 ;
} ;
# endif
BufferContainer< unsigned char > VideoEditerBase :: thumbnail ( const std:: string& filePath)
{
av_register_all ( ) ;
BufferContainer< unsigned char > result;
AVFormatContext* fmtContext = nullptr ;
if ( avformat_open_input ( & fmtContext, filePath. c_str ( ) , nullptr , nullptr ) < 0 ) {
return result;
}
if ( avformat_find_stream_info ( fmtContext, nullptr ) < 0 ) {
avformat_close_input ( & fmtContext) ;
return result;
}
int nStreamIndex = - 1 ;
AVCodecParameters* codecParameters = nullptr ;
for ( int i = 0 ; i < fmtContext-> nb_streams; i++ ) {
if ( fmtContext-> streams[ i] -> codecpar-> codec_type == AVMEDIA_TYPE_VIDEO) {
nStreamIndex = i;
codecParameters = fmtContext-> streams[ i] -> codecpar;
break ;
}
}
if ( nStreamIndex == - 1 ) {
avformat_close_input ( & fmtContext) ;
return result;
}
AVCodec* codec = avcodec_find_decoder ( codecParameters-> codec_id) ;
if ( ! codec) {
avformat_close_input ( & fmtContext) ;
return result;
}
AVCodecContext* codecContext = avcodec_alloc_context3 ( codec) ;
if ( ! codecContext) {
avformat_close_input ( & fmtContext) ;
return result;
}
if ( avcodec_parameters_to_context ( codecContext, codecParameters) < 0 ) {
avcodec_free_context ( & codecContext) ;
avformat_close_input ( & fmtContext) ;
return result;
}
if ( avcodec_open2 ( codecContext, codec, nullptr ) < 0 ) {
avcodec_free_context ( & codecContext) ;
avformat_close_input ( & fmtContext) ;
return result;
}
AVPacket packet;
av_init_packet ( & packet) ;
packet. data = nullptr ;
packet. size = 0 ;
int frameFinished = 0 ;
AVFrame* frame = av_frame_alloc ( ) ;
while ( av_read_frame ( fmtContext, & packet) >= 0 ) {
if ( packet. stream_index != nStreamIndex) {
continue ;
}
int ret = avcodec_decode_video2 ( codecContext, frame, & frameFinished, & packet) ;
if ( ! frameFinished) {
continue ;
}
if ( frame) {
int ret = avcodec_send_packet ( codecContext, & packet) ;
if ( ret >= 0 ) {
ret = avcodec_receive_frame ( codecContext, frame) ;
if ( ret >= 0 ) {
if ( frame-> key_frame) {
AVFrame* rgbFrame = av_frame_alloc ( ) ;
if ( rgbFrame) {
rgbFrame-> format = AV_PIX_FMT_RGB24;
rgbFrame-> width = frame-> width;
rgbFrame-> height = frame-> height;
int bufferSize = av_image_get_buffer_size ( AV_PIX_FMT_RGB24, frame-> width, frame-> height, 1 ) ;
uint8_t * buffer = new uint8_t [ bufferSize] ;
av_image_fill_arrays ( rgbFrame-> data, rgbFrame-> linesize, buffer, AV_PIX_FMT_RGB24, frame-> width, frame-> height, 1 ) ;
SwsContext* swsContext = sws_getContext ( frame-> width, frame-> height, codecContext-> pix_fmt,
frame-> width, frame-> height, AV_PIX_FMT_RGB24, SWS_BICUBIC, nullptr , nullptr , nullptr ) ;
if ( swsContext) {
sws_scale ( swsContext, frame-> data, frame-> linesize, 0 , frame-> height, rgbFrame-> data, rgbFrame-> linesize) ;
sws_freeContext ( swsContext) ;
int outputBufferSize = rgbFrame-> width * rgbFrame-> height * 3 ;
unsigned char * outputBuffer = new unsigned char [ outputBufferSize] ;
for ( int i = 0 ; i < rgbFrame-> height; i++ ) {
memcpy ( outputBuffer + i * rgbFrame-> width * 3 , rgbFrame-> data[ 0 ] + i * rgbFrame-> linesize[ 0 ] , rgbFrame-> width * 3 ) ;
}
result. setData ( outputBuffer, outputBufferSize) ;
if ( outputBuffer) {
delete [ ] outputBuffer;
outputBuffer = nullptr ;
}
}
if ( buffer) {
delete [ ] buffer;
buffer = nullptr ;
}
av_frame_free ( & rgbFrame) ;
}
}
}
}
}
av_packet_unref ( & packet) ;
break ;
}
av_frame_free ( & frame) ;
avcodec_free_context ( & codecContext) ;
avformat_close_input ( & fmtContext) ;
return result;
}
视频
# include "videoeditermp4.h"
# include <QDebug>
extern "C" {
# include "libavformat/avformat.h"
# include "libavcodec/avcodec.h"
# include "libavutil/imgutils.h"
# include "libavutil/opt.h"
# include "libswresample/swresample.h"
# include "libswscale/swscale.h"
}
VideoEditerMp4 :: VideoEditerMp4 ( )
{
}
VideoEditerMp4 :: ~ VideoEditerMp4 ( ) {
stop ( ) ;
}
void VideoEditerMp4 :: startClip ( std:: string input, std:: string output, int64_t st, int64_t et)
{
if ( ! m_vecInputPaths. empty ( ) ) {
m_vecInputPaths. clear ( ) ;
}
m_vecInputPaths. push_back ( input) ;
m_outputPath = output;
m_startTime = st;
m_endTime = et;
m_pThread = new std:: thread ( & VideoEditerMp4:: runClip, this ) ;
m_pThread-> detach ( ) ;
}
void VideoEditerMp4 :: startMerge ( std:: vector< std:: string> inputs, std:: string output)
{
if ( ! m_vecInputPaths. empty ( ) ) {
m_vecInputPaths. clear ( ) ;
}
m_vecInputPaths= inputs;
m_outputPath = output;
m_pThread = new std:: thread ( & VideoEditerMp4:: runMerge, this ) ;
m_pThread-> detach ( ) ;
}
void VideoEditerMp4 :: runClip ( )
{
stateCallBack ( RUNNING) ;
if ( m_vecInputPaths. empty ( ) || m_outputPath. empty ( ) ) {
stateCallBack ( FAIL) ;
return ;
}
AVFormatContext* fmtContext = avformat_alloc_context ( ) ;
if ( avformat_open_input ( & fmtContext, m_vecInputPaths. front ( ) . c_str ( ) , nullptr , nullptr ) < 0 ) {
stateCallBack ( FAIL) ;
return ;
}
if ( avformat_find_stream_info ( fmtContext, nullptr ) < 0 ) {
avformat_close_input ( & fmtContext) ;
stateCallBack ( FAIL) ;
return ;
}
int videoStreamIndex = - 1 ;
for ( int i = 0 ; i < fmtContext-> nb_streams; i++ ) {
if ( fmtContext-> streams[ i] -> codecpar-> codec_type == AVMEDIA_TYPE_VIDEO) {
videoStreamIndex = i;
break ;
}
}
int audioStreamIndex = - 1 ;
for ( int i = 0 ; i < fmtContext-> nb_streams; i++ ) {
if ( fmtContext-> streams[ i] -> codecpar-> codec_type == AVMEDIA_TYPE_AUDIO) {
audioStreamIndex = i;
break ;
}
}
AVFormatContext* output_format_ctx = nullptr ;
if ( avformat_alloc_output_context2 ( & output_format_ctx, nullptr , nullptr , m_outputPath. c_str ( ) ) )
{
avformat_close_input ( & fmtContext) ;
stateCallBack ( FAIL) ;
return ;
}
for ( int i = 0 ; i < fmtContext-> nb_streams; i++ ) {
AVStream* stream = avformat_new_stream ( output_format_ctx, nullptr ) ;
if ( ! stream) {
avformat_close_input ( & fmtContext) ;
avformat_close_input ( & output_format_ctx) ;
stateCallBack ( FAIL) ;
return ;
}
if ( avcodec_copy_context ( stream-> codec, fmtContext-> streams[ i] -> codec) < 0 ) {
avformat_close_input ( & fmtContext) ;
avformat_close_input ( & output_format_ctx) ;
stateCallBack ( FAIL) ;
return ;
}
if ( avcodec_parameters_copy ( stream-> codecpar, fmtContext-> streams[ i] -> codecpar) < 0 ) {
avformat_close_input ( & fmtContext) ;
avformat_close_input ( & output_format_ctx) ;
stateCallBack ( FAIL) ;
return ;
}
}
if ( ! ( output_format_ctx-> oformat-> flags & AVFMT_NOFILE) ) {
if ( avio_open ( & output_format_ctx-> pb, m_outputPath. c_str ( ) , AVIO_FLAG_WRITE) < 0 ) {
avformat_close_input ( & fmtContext) ;
avformat_close_input ( & output_format_ctx) ;
stateCallBack ( FAIL) ;
return ;
}
}
if ( avformat_write_header ( output_format_ctx, nullptr ) < 0 ) {
avformat_close_input ( & fmtContext) ;
avformat_close_input ( & output_format_ctx) ;
stateCallBack ( FAIL) ;
return ;
}
int64_t videoIndex = 0 ;
int64_t audioIndex = 0 ;
AVRational timeBase= fmtContext-> streams[ videoStreamIndex] -> time_base;
AVPacket packet;
while ( true )
{
if ( m_bStop) {
break ;
}
if ( av_read_frame ( fmtContext, & packet) < 0 ) {
break ;
}
if ( packet. stream_index == audioStreamIndex) {
if ( packet. pts* av_q2d ( timeBase) >= m_startTime && packet. pts* av_q2d ( timeBase) <= m_endTime) {
if ( audioIndex % m_interval == 0 ) {
if ( audioIndex== 0 ) {
packet. pts = av_rescale_q ( 0 , timeBase, output_format_ctx-> streams[ videoStreamIndex] -> time_base) ;
packet. dts = av_rescale_q ( 0 , timeBase, output_format_ctx-> streams[ videoStreamIndex] -> time_base) ;
} else {
packet. pts = av_rescale_q ( packet. pts- m_startTime/ av_q2d ( timeBase) , timeBase, output_format_ctx-> streams[ videoStreamIndex] -> time_base) ;
packet. dts = av_rescale_q ( packet. dts - m_startTime/ av_q2d ( timeBase) , timeBase, output_format_ctx-> streams[ videoStreamIndex] -> time_base) ;
}
packet. duration = av_rescale_q ( packet. duration, timeBase, output_format_ctx-> streams[ videoStreamIndex] -> time_base) ;
av_write_frame ( output_format_ctx, & packet) ;
audioIndex++ ;
continue ;
av_packet_unref ( & packet) ;
}
audioIndex++ ;
}
}
if ( packet. stream_index == videoStreamIndex) {
if ( packet. pts* av_q2d ( timeBase) >= m_startTime && packet. pts* av_q2d ( timeBase) <= m_endTime) {
qDebug ( ) << "============>sec:" << packet. pts* av_q2d ( timeBase) << "<==============" ;
if ( videoIndex % m_interval == 0 ) {
packet. pts = av_rescale_q ( packet. pts- m_startTime/ av_q2d ( timeBase) , timeBase, output_format_ctx-> streams[ videoStreamIndex] -> time_base) ;
packet. dts = av_rescale_q ( packet. dts - m_startTime/ av_q2d ( timeBase) , timeBase, output_format_ctx-> streams[ videoStreamIndex] -> time_base) ;
packet. duration = av_rescale_q ( packet. duration, timeBase, output_format_ctx-> streams[ videoStreamIndex] -> time_base) ;
av_write_frame ( output_format_ctx, & packet) ;
videoIndex++ ;
av_packet_unref ( & packet) ;
continue ;
}
videoIndex++ ;
}
}
av_packet_unref ( & packet) ;
{
std:: unique_lock< std:: mutex> lock ( m_mutex) ;
cv. wait ( lock, [ this ] {
if ( m_bPaused) {
stateCallBack ( PAUSE) ;
}
return ! m_bPaused; } ) ;
}
}
av_write_trailer ( output_format_ctx) ;
avformat_close_input ( & fmtContext) ;
avio_close ( output_format_ctx-> pb) ;
avformat_free_context ( output_format_ctx) ;
if ( m_bStop) {
stateCallBack ( STOP) ;
return ;
}
stateCallBack ( FINISH) ;
}
void VideoEditerMp4 :: runMerge ( )
{
stateCallBack ( RUNNING) ;
if ( m_vecInputPaths. empty ( ) || m_outputPath. empty ( ) ) {
stateCallBack ( FAIL) ;
return ;
}
AVFormatContext* fmtContext = avformat_alloc_context ( ) ;
if ( avformat_open_input ( & fmtContext, m_vecInputPaths. front ( ) . c_str ( ) , nullptr , nullptr ) < 0 ) {
stateCallBack ( FAIL) ;
return ;
}
if ( avformat_find_stream_info ( fmtContext, nullptr ) < 0 ) {
avformat_close_input ( & fmtContext) ;
stateCallBack ( FAIL) ;
return ;
}
int videoStreamIndex = - 1 ;
for ( int i = 0 ; i < fmtContext-> nb_streams; i++ ) {
if ( fmtContext-> streams[ i] -> codecpar-> codec_type == AVMEDIA_TYPE_VIDEO) {
videoStreamIndex = i;
break ;
}
}
int audioStreamIndex = - 1 ;
for ( int i = 0 ; i < fmtContext-> nb_streams; i++ ) {
if ( fmtContext-> streams[ i] -> codecpar-> codec_type == AVMEDIA_TYPE_AUDIO) {
audioStreamIndex = i;
break ;
}
}
AVFormatContext* output_format_ctx = nullptr ;
if ( avformat_alloc_output_context2 ( & output_format_ctx, nullptr , nullptr , m_outputPath. c_str ( ) ) )
{
avformat_close_input ( & fmtContext) ;
stateCallBack ( FAIL) ;
return ;
}
for ( int i = 0 ; i < fmtContext-> nb_streams; i++ ) {
AVStream* stream = avformat_new_stream ( output_format_ctx, nullptr ) ;
if ( ! stream) {
avformat_close_input ( & fmtContext) ;
avformat_close_input ( & output_format_ctx) ;
stateCallBack ( FAIL) ;
return ;
}
if ( avcodec_copy_context ( stream-> codec, fmtContext-> streams[ i] -> codec) < 0 ) {
avformat_close_input ( & fmtContext) ;
avformat_close_input ( & output_format_ctx) ;
stateCallBack ( FAIL) ;
return ;
}
if ( avcodec_parameters_copy ( stream-> codecpar, fmtContext-> streams[ i] -> codecpar) < 0 ) {
avformat_close_input ( & fmtContext) ;
avformat_close_input ( & output_format_ctx) ;
stateCallBack ( FAIL) ;
return ;
}
}
if ( ! ( output_format_ctx-> oformat-> flags & AVFMT_NOFILE) ) {
if ( avio_open ( & output_format_ctx-> pb, m_outputPath. c_str ( ) , AVIO_FLAG_WRITE) < 0 ) {
avformat_close_input ( & fmtContext) ;
avformat_close_input ( & output_format_ctx) ;
stateCallBack ( FAIL) ;
return ;
}
}
if ( avformat_write_header ( output_format_ctx, nullptr ) < 0 ) {
avformat_close_input ( & fmtContext) ;
avformat_close_input ( & output_format_ctx) ;
stateCallBack ( FAIL) ;
return ;
}
int64_t videoIndex = 0 ;
int64_t audioIndex = 0 ;
AVRational timeBase= fmtContext-> streams[ videoStreamIndex] -> time_base;
int64_t currentPts= 0 ;
AVPacket packet;
for ( int k= 0 ; k< m_vecInputPaths. size ( ) ; k++ ) {
if ( m_bStop) {
break ;
}
avformat_close_input ( & fmtContext) ;
if ( avformat_open_input ( & fmtContext, m_vecInputPaths. at ( k) . c_str ( ) , nullptr , nullptr ) < 0 ) {
stateCallBack ( FAIL) ;
return ;
}
if ( avformat_find_stream_info ( fmtContext, nullptr ) < 0 ) {
avformat_close_input ( & fmtContext) ;
stateCallBack ( FAIL) ;
return ;
}
int videoStreamIndex = - 1 ;
for ( int i = 0 ; i < fmtContext-> nb_streams; i++ ) {
if ( fmtContext-> streams[ i] -> codecpar-> codec_type == AVMEDIA_TYPE_VIDEO) {
videoStreamIndex = i;
break ;
}
}
int audioStreamIndex = - 1 ;
for ( int i = 0 ; i < fmtContext-> nb_streams; i++ ) {
if ( fmtContext-> streams[ i] -> codecpar-> codec_type == AVMEDIA_TYPE_AUDIO) {
audioStreamIndex = i;
break ;
}
}
if ( k!= 0 ) {
currentPts+= ( fmtContext-> streams[ videoStreamIndex] -> duration/ 1000 ) / av_q2d ( timeBase) ;
}
while ( true )
{
if ( m_bStop) {
break ;
}
if ( av_read_frame ( fmtContext, & packet) < 0 ) {
break ;
}
if ( packet. stream_index == audioStreamIndex) {
if ( audioIndex % m_interval == 0 ) {
if ( audioIndex== 0 ) {
packet. pts = av_rescale_q ( 0 , timeBase, output_format_ctx-> streams[ videoStreamIndex] -> time_base) ;
packet. dts = av_rescale_q ( 0 , timeBase, output_format_ctx-> streams[ videoStreamIndex] -> time_base) ;
} else {
packet. pts = av_rescale_q ( packet. pts+ currentPts, timeBase, output_format_ctx-> streams[ videoStreamIndex] -> time_base) ;
packet. dts = av_rescale_q ( packet. dts+ currentPts, timeBase, output_format_ctx-> streams[ videoStreamIndex] -> time_base) ;
}
packet. duration = av_rescale_q ( packet. duration, timeBase, output_format_ctx-> streams[ videoStreamIndex] -> time_base) ;
av_write_frame ( output_format_ctx, & packet) ;
audioIndex++ ;
continue ;
av_packet_unref ( & packet) ;
}
audioIndex++ ;
}
if ( packet. stream_index == videoStreamIndex) {
qDebug ( ) << "============>sec:" << packet. pts* av_q2d ( timeBase) << "<==============" ;
if ( videoIndex % m_interval == 0 ) {
if ( videoIndex== 0 ) {
packet. pts = av_rescale_q ( 0 , timeBase, output_format_ctx-> streams[ videoStreamIndex] -> time_base) ;
packet. dts = av_rescale_q ( 0 , timeBase, output_format_ctx-> streams[ videoStreamIndex] -> time_base) ;
} else {
packet. pts = av_rescale_q ( packet. pts+ currentPts, timeBase, output_format_ctx-> streams[ videoStreamIndex] -> time_base) ;
packet. dts = av_rescale_q ( packet. dts+ currentPts, timeBase, output_format_ctx-> streams[ videoStreamIndex] -> time_base) ;
}
packet. duration = av_rescale_q ( packet. duration, timeBase, output_format_ctx-> streams[ videoStreamIndex] -> time_base) ;
av_write_frame ( output_format_ctx, & packet) ;
videoIndex++ ;
av_packet_unref ( & packet) ;
continue ;
}
videoIndex++ ;
}
av_packet_unref ( & packet) ;
{
std:: unique_lock< std:: mutex> lock ( m_mutex) ;
cv. wait ( lock, [ this ] {
if ( m_bPaused) {
stateCallBack ( PAUSE) ;
}
return ! m_bPaused; } ) ;
}
}
}
av_write_trailer ( output_format_ctx) ;
avformat_close_input ( & fmtContext) ;
avio_close ( output_format_ctx-> pb) ;
avformat_free_context ( output_format_ctx) ;
if ( m_bStop) {
stateCallBack ( STOP) ;
return ;
}
stateCallBack ( FINISH) ;
}