ffmpeg取rtsp流,打印音视频的ts
改demo程序,调用ffmpeg API,获取rtsp的音视频流,打印每一包的ts,验证是否同步。运行环境是linux//FFDecoder.h#pragma once//#define __STDC_CONSTANT_MACROSextern "C" {#include "libavcodec/avcodec.h"#include "libavformat
·
改demo程序,调用ffmpeg API,获取rtsp的音视频流,打印每一包的ts,验证是否同步。
运行环境是linux
//FFDecoder.h
#pragma once
//#define __STDC_CONSTANT_MACROS
extern "C" {
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
#include "libavutil/avutil.h"
#include "libavutil/opt.h"
#include "libswscale/swscale.h"
}
#pragma comment(lib,"avutil.lib")
#pragma comment(lib,"avformat.lib")
#pragma comment(lib,"swresample.lib")
#pragma comment(lib,"avcodec.lib")
#pragma comment(lib,"swscale.lib")
class CFFDecoder
{
public:
CFFDecoder();
virtual ~CFFDecoder();
int OpenFile(const char *pFilePath);
int GetMediaInfo(int &nFrameW,int &nFrameH);
int GetOneFrame(AVFrame *pFrame);
private:
AVFormatContext *m_pFormatCxt;
AVCodecContext *m_pCodecCtx;
AVCodec *m_pCodec;
AVCodecContext *m_pACodecCtx;
AVCodec *m_pACodec;
AVPacket m_Packet;
int m_nVideoIndex;
int m_nAudioIndex;
double m_nLastAudioPTS;
double m_nLastVideoPTS;
double m_nStartAudioTS;
double m_nStartVideoTS;
};
//FFDecoder.cpp
#include "FFDecoder.h"
CFFDecoder::CFFDecoder()
{
m_pFormatCxt = avformat_alloc_context();
m_pCodecCtx = NULL;
m_pCodec = NULL;
m_nVideoIndex = -1;
m_nAudioIndex = -1;
m_nLastAudioPTS = -1;
m_nLastVideoPTS = -1;
m_nStartAudioTS = -1;
m_nStartVideoTS = -1;
}
CFFDecoder::~CFFDecoder()
{
}
int CFFDecoder::OpenFile(const char *pFilePath)
{
av_register_all();
if(avformat_open_input(&m_pFormatCxt,pFilePath,NULL,NULL)<0)
{
return -1;
}
if (avformat_find_stream_info(m_pFormatCxt,NULL)<0)
{
return -2;
}
//找到音视频对应的流通道
for (int i=0;i<m_pFormatCxt->nb_streams;i++)
{
if (m_pFormatCxt->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
{
m_nVideoIndex = i;
}
else if (m_pFormatCxt->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO)
{
m_nAudioIndex = i;
}
}
if (m_nVideoIndex == -1)
{
return -3;
}
//打开相应的解码器
m_pCodecCtx = m_pFormatCxt->streams[m_nVideoIndex]->codec;
m_pCodec = avcodec_find_decoder(m_pCodecCtx->codec_id);;
if(m_pCodec==NULL){
return -4;
}
if(avcodec_open2(m_pCodecCtx, m_pCodec,NULL)<0){
return -5;
}
//打开相应的解码器
m_pACodecCtx = m_pFormatCxt->streams[m_nAudioIndex]->codec;
m_pACodec = avcodec_find_decoder(m_pACodecCtx->codec_id);;
if(m_pACodec==NULL){
return -4;
}
if(avcodec_open2(m_pACodecCtx, m_pACodec,NULL)<0){
return -5;
}
m_nStartVideoTS = m_pFormatCxt->streams[m_nVideoIndex]->start_time;
m_nStartAudioTS = m_pFormatCxt->streams[m_nAudioIndex]->start_time;
return 0;
}
int CFFDecoder::GetMediaInfo(int &nFrameW,int &nFrameH)
{
if(m_pCodecCtx==NULL)
return -1;
nFrameW = m_pCodecCtx->width;
nFrameH = m_pCodecCtx->height;
return 0;
}
int CFFDecoder::GetOneFrame(AVFrame *pFrame)
{
if(m_pFormatCxt==NULL)
return 0;
int nGotPicture=-1;
if(av_read_frame(m_pFormatCxt,&m_Packet)>=0)
{
//判断是否为当前视频流中的包
if (m_Packet.stream_index == m_nVideoIndex)
{
//printf("video pts = %ld dts=%ld --- pts=%ld dur=%ld\n",
// m_Packet.pts,m_Packet.dts,
// m_Packet.pts*av_q2d(m_pFormatCxt->streams[m_nVideoIndex]->time_base),
// m_Packet.pts-m_nLastVideoPTS);
double pts = m_Packet.pts/90;
double dts = m_Packet.dts/90;
double nDur = pts-m_nLastVideoPTS;
//m_nStartVideoTS = m_pFormatCxt->streams[m_nVideoIndex]->start_time;
//long pts2 = m_Packet.pts*av_q2d(m_pFormatCxt->streams[m_nVideoIndex]->time_base);
printf("video pts = %.02f dts=%.02f --- dur=%.02f\n",
pts,
dts,
nDur);
m_nLastVideoPTS = pts;
int nLen = avcodec_decode_video2(m_pCodecCtx,pFrame,&nGotPicture,&m_Packet);
if (nLen<0)
{
return 0;
}
if (nGotPicture)
{
//成功得到一帧数据
//long pts = av_frame_get_best_effort_timestamp(pFrame);
//printf("after decoder video pts=%ld\n",(pts)/90);
//m_nLastVideoPTS = (pts)/90;
return nLen;
}
}
else if (m_nAudioIndex == m_Packet.stream_index)
{
AVRational tb;
tb.den = 8000;
tb.num = 1;
double pts = (m_Packet.pts)/8;
double dts = m_Packet.dts/8;
double nDur = pts-m_nLastAudioPTS;
//long lpts3 = av_rescale_q(pts,m_pFormatCxt->streams[m_nAudioIndex]->time_base,tb);
//long pts2 = pts*av_q2d(m_pFormatCxt->streams[m_nAudioIndex]->time_base);
printf("audio pts = %.02f dts=%.02f --- dur=%.02f\n",
pts,
dts,
nDur);
m_nLastAudioPTS = pts;
//av_rescale_q_rnd(m_Packet.pts, m_pFormatCxt->streams[m_nAudioIndex]->time_base, m_pFormatCxt->streams[m_nAudioIndex]->time_base, AV_ROUND_NEAR_INF));
int ret = avcodec_decode_audio4(m_pACodecCtx, pFrame, &nGotPicture, &m_Packet);
if (nGotPicture) {
//AVRational tb;
//tb.den = pFrame->sample_rate;
//tb.num = 1;
//if (m_nLastAudioPTS != AV_NOPTS_VALUE)
// m_nLastAudioPTS = av_rescale_q(pFrame->pts, m_pCodecCtx->time_base, tb);
//else if (pFrame->pkt_pts != AV_NOPTS_VALUE)
// m_nLastAudioPTS = av_rescale_q(pFrame->pkt_pts, av_codec_get_pkt_timebase(m_pCodecCtx), tb);
//printf("after decoder audio pts=%ld\n",m_nLastAudioPTS);
}
}
printf("v.ts(%.02f) - a.ts(%.02f) = %.02f\n",m_nLastVideoPTS,m_nLastAudioPTS,m_nLastVideoPTS - m_nLastAudioPTS);
/*time_t now;
struct tm *timenow;
time(&now);
timenow = localtime(&now);
printf(">>>> now time is %s <<<<\n",asctime(timenow));*/
time_t timer;
struct tm *t_tm;
time(&timer);
t_tm = localtime(&timer);
printf("now time is %4d-%02d-%02d %02d:%02d:%02d\n",
t_tm->tm_year+1900,
t_tm->tm_mon,
t_tm->tm_mday,
t_tm->tm_hour,
t_tm->tm_min,
t_tm->tm_sec);
//printf("a.ts - v.ts = %d\n",m_nLastAudioPTS - m_nLastVideoPTS);
}
av_free_packet(&m_Packet);
return 0;
}
主函数
// FFmpegTest.cpp : 定义控制台应用程序的入口点。
//
#include "FFDecoder.h"
int main(int argc, char **argv)
{
if(argc!=2)
{
printf("err arg. exe rtsp://...\n");
return 0;
}
char szRTSPUrl[256]={0};
strcpy(szRTSPUrl,argv[1]);
printf("rtsp url = %s\n",szRTSPUrl);
CFFDecoder dec;
dec.OpenFile(szRTSPUrl);
AVFrame *pFrame=av_frame_alloc();
while(1)
{
if(dec.GetOneFrame(pFrame)>0)
{
//printf("pts=%lld\n",pFrame->pkt_dts);
}
}
system("pause");
return 0;
}
编译脚本:
g++ -o ffts *.cpp -I/usr/local/include/ -I./ -D__STDC_CONSTANT_MACROS -L/usr/local/lib/ -lavcodec -lavdevice -lavformat -lavfilter -lswscale
更多推荐
已为社区贡献3条内容
所有评论(0)