首先,介绍一下,h264主要操作流程,见函数:
#include "API_H264Decode.hpp"
//----------------------------ffmpeg  h264解码类
API::H264DecodeDev::H264DecodeDev():
m_pcodec(nullptr),
m_pCodeCtx(nullptr),
m_pFrame(nullptr),//m_pCodecParserCtx(nullptr),
m_nFrameNumber(0),
m_nWidth(0),
m_nHeight(0)
{
}
API::H264DecodeDev::~H264DecodeDev(){
  if (m_pFrame != nullptr)
  {
    av_frame_free(&m_pFrame);
    m_pFrame = nullptr;
  }
  if (m_pcodec != nullptr)
  {
    avcodec_close(m_pCodeCtx);
    av_free(m_pcodec);
    m_pcodec = nullptr;
  }
/*  if (m_pCodecParserCtx != nullptr)
  {
    av_parser_close(m_pCodecParserCtx);
    m_pCodecParserCtx = nullptr;
  }*/
}
bool API::H264DecodeDev::InitializeDev(LPGetFramePtr  pCallBack,int width,int height){
   if (nullptr == pCallBack){
      LLERROR("H264  CallBack_Ptr is nullptr");
      return false;
    }
    if (0 == width || 0 == height){
      LLERROR("H264  width or height is zero");
      return false;
    }
    m_pCallBack = pCallBack;

    m_nWidth    = width;
    m_nHeight   = height;

    m_pcodec = avcodec_find_decoder(AV_CODEC_ID_H264);
    if(m_pcodec != nullptr)
    {
       m_pcodec->capabilities |= CODEC_CAP_DELAY;
       m_pCodeCtx = avcodec_alloc_context3(m_pcodec);
       //m_pCodeCtx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
       if(m_pCodeCtx != nullptr)
       {
          //if((nullptr ==  m_pCodeCtx->extradata) || (0 == m_pCodeCtx->extradata_size))
          //m_pCodeCtx->width =   m_nWidth;
          //m_pCodeCtx->height =  m_nHeight;
          //m_pCodeCtx->codec_id =  AV_CODEC_ID_H264;
          //m_pCodeCtx->codec_type = AVMEDIA_TYPE_VIDEO;
          //m_pCodeCtx->pix_fmt =   AV_PIX_FMT_YUV420P;
          //m_pCodecParserCtx = av_parser_init(AV_CODEC_ID_H264);
          //if(m_pCodecParserCtx != nullptr)
          // {
                if(avcodec_open2(m_pCodeCtx,m_pcodec,nullptr) < 0)
                {
                    LLERROR("avcodec_open2  failed ");
                    return false;
                }
                else
                {
                    m_pFrame = av_frame_alloc();
                    //avcodec_send_packet(m_pCodeCtx, nullptr);
                    //avcodec_flush_buffers(m_pCodeCtx);
                    av_init_packet(&m_Packet);
                    //LLDEBUG("Init Decode ok"); 
                    return true;
                }
           // }
           /* else
            {
                LLERROR("av_parser_init failed");
                return false;
            }*/
        }
        else
        {
          LLERROR("avcodec_alloc_context3 failed");
          return false;
        }
    }
    else
    {
       LLERROR("avcodec_find_decoder failed");
       return false;
    }
    return false;
}

bool API::H264DecodeDev::ProcessDecode(uint8_t* pBuffer,int nSize,const char* fStreamId) //rtsp方式获取数据流回调函数
{
    bool            bReturn = true;
    m_Packet.data           = pBuffer;
    m_Packet.size           = nSize;

    int getpicture = avcodec_send_packet(m_pCodeCtx, &m_Packet);//avcodec_flush_buffers 
    if(getpicture == 0)
    {
        getpicture = avcodec_receive_frame(m_pCodeCtx, m_pFrame);
        av_packet_unref(&m_Packet);
        if (getpicture == 0)
        {
            bReturn = true;
            ++m_nFrameNumber;
            //LLDEBUG("FrameNum:%d",m_nFrameNumber);
            int   nDataSize = FrameChangeRGB24(m_pFrame, nullptr); //获取每帧长度
            if(nDataSize)
            {
                try
                {
                    unsigned char*   pImage = new unsigned char[nDataSize];
                    if(pImage != nullptr)
                    {
                          memset(pImage,0,nDataSize);
                          FrameChangeRGB24(m_pFrame,&pImage);
                          RedChangeBlue(pImage,m_nWidth,m_nHeight);
                          if(m_pCallBack != nullptr)
                          {
                              m_pCallBack(pImage,m_nFrameNumber,fStreamId); 
                              delete[] pImage;
                          }      
                    }
                }
                catch(const std::bad_alloc& e)
                {
                    bReturn = false;
                    LLERROR("new malloc exception:%s",e.what());
                    return bReturn;
                }
                catch(const std::exception& e)
                {
                    bReturn = false;
                    LLERROR("other  exception: %s",e.what());
                    return bReturn;
                }
            }
        }
    }
    return bReturn;
}

//RGB24红蓝调换
void API::H264DecodeDev::RedChangeBlue(uint8_t* pRGB, int nWidth, int nHeight)
{
  for (int x = 0; x < nHeight; ++x)
  {
    for (int y = 0; y < nWidth; ++y)
    {
      int   nSize = (x * nWidth + y) * 3;
      uint8_t uTemp = pRGB[nSize + 0];
      pRGB[nSize + 0] = pRGB[nSize + 2];
      pRGB[nSize + 2] = uTemp;
    }
  }
}
int API::H264DecodeDev::FrameChangeRGB24(AVFrame* pFrame, uint8_t** pRGB)
{
    int   iSize = m_nWidth * m_nHeight * 3;
    if(pRGB == nullptr)
    {
        return iSize;
    }
    struct SwsContext* img_convert_ctx = nullptr;
    int linesize[4] = { 3 * m_nWidth, 0, 0, 0 };
    //LLDEBUG("Codex width: %d ,pFrame width :%d , pFrame height: %d",m_pCodeCtx->width,pFrame->width,pFrame->height);
    img_convert_ctx = sws_getContext(pFrame->width, pFrame->height, AV_PIX_FMT_YUV420P, m_nWidth, m_nHeight, AV_PIX_FMT_RGB24, SWS_POINT, nullptr, nullptr, nullptr);
    if(img_convert_ctx != nullptr)
    {
      sws_scale(img_convert_ctx, pFrame->data, pFrame->linesize, 0, pFrame->height, (uint8_t**)pRGB, linesize);
      sws_freeContext(img_convert_ctx);
    }
    return 0;
}

然后,是这个文件的头文件:

#ifndef			_API_H264_DECODE_HPP_
#define			_API_H264_DECODE_HPP_
#include 		"API_Define.hpp"
#include 		<exception>
typedef void(*LPGetFramePtr)(u_int8_t* pImage ,int& frame,const char* CameraIp);
extern "C"
{
	#include <libavutil/opt.h>
	#include <libavcodec/avcodec.h>
	#include <libavutil/channel_layout.h>
	#include <libavutil/common.h>
	#include <libavutil/imgutils.h>
	#include <libavutil/mathematics.h>
	#include <libavutil/samplefmt.h>
	#include <libswscale/swscale.h>
	#include <libavformat/avformat.h> 
};
//API命名空间
namespace API
{
	class H264DecodeDev{
	public:
		H264DecodeDev();
		virtual ~H264DecodeDev();
	private:
		AVCodec 			        *m_pcodec;
		AVCodecContext              *m_pCodeCtx;
		AVFormatContext				*m_pFormat;
		//AVCodecParserContext		*m_pCodecParserCtx;
		AVFrame 					*m_pFrame;
		AVPacket        			m_Packet;
	private:
		LPGetFramePtr  				m_pCallBack;
	private:
		int 						m_nWidth;
		int 						m_nHeight;	
	public://ffmpeg 
		virtual bool 				InitializeDev(LPGetFramePtr  pCallBack,int width,int height);//
		int 						FrameChangeRGB24(AVFrame* pFrame, uint8_t** pRGB);
		void 						RedChangeBlue(uint8_t* pRGB, int nWidth, int nHeight);
		virtual bool 				ProcessDecode(uint8_t* pBuffer,int nSize,const char* fStreamId);
	public:
		int  						m_nFrameNumber;
	};
};
#endif

最后一个文件是,定义 宏的  文件:API_Define.hpp

#ifndef			_API_DEFINE_HPP_H
#define			_API_DEFINE_HPP_H
//宏定义
#define		nullptr			NULL
#define         CALLBACK
#define		CHAR_SIZE		128
#define		NEURALNETWORK_NULL	0
#define		NEURALNETWORK_BEGIN	1
#define		NEURALNETWORK_END	2

//解码器定义
#define		DECODE_ONE		1
#define		DECODE_TWO		2

//套接字定义
#define		NEURALNETWORK_PORT	9796
#define		API_MAIN_SERVER_PORT	9995
#define 	RTSP_DATA_PORT 9797
//摄像头定义
#define		CAMERA_TYPE_LINKAGE	0	//联动
#define		CAMERA_TYPE_GUARD	4	//看守
#define		CAMERA_TYPE_MANUAL	1	//手动

#define     AVI_ACCEPT_SERVER_PORT 9527
//获取时间定义
#define		TIME_SIZE		64


//图像定义

//#define		IMAGE_WIDTH		960
//#define		IMAGE_HEIGHT		540
//#define		IMAGE_SIZE		960 * 540 * 3

#define		IMAGE_WIDTH		640
#define		IMAGE_HEIGHT		480
#define		IMAGE_SIZE		640 * 480 * 3

//打印信息
//#define		__DEBUG__
#ifdef		__DEBUG__
//#define		LLDEBUG(format, ...)		printf("NORMAL:%s(%d): " format"\n", __FILE__, __LINE__, ##__VA_ARGS__)
//#define		LLERROR(format, ...)		printf("ERROR:%s(%d): " format"\n", __FILE__, __LINE__, ##__VA_ARGS__)
#define		LLDEBUG(format, ...)		printf("\033[;32mDEBUG\033[0m:%s(%d): " format"\n", __FILE__, __LINE__, ##__VA_ARGS__)
#define		LLERROR(format, ...)		printf("\033[;31mERROR\033[0m:%s(%d): " format"\n", __FILE__, __LINE__, ##__VA_ARGS__)
#else
#define		LLDEBUG(format, ...)		printf("\033[;32mPRINT\033[0m: " format"\n", ##__VA_ARGS__)
#define		LLERROR(format, ...)		printf("\033[;31mERROR\033[0m: " format"\n", ##__VA_ARGS__)
#endif

/*
编译器内置宏:
ANSI C标准中有几个标准预定义宏(也是常用的):
__LINE__:在源代码中插入当前源代码行号;
__FILE__:在源文件中插入当前源文件名;
__DATE__:在源文件中插入当前的编译日期
__TIME__:在源文件中插入当前编译时间;
__STDC__:当要求程序严格遵循ANSI C标准时该标识被赋值为1;
__cplusplus:当编写C++程序时该标识符被定义。
DEBUG(format,...) printf("FILE: "__FILE__", LINE: %d: "format"/n", __LINE__, ##__VA_ARGS__)
__FUNCDNAME__ 与 __FUNCSIG__ ,其分别转译为一个函数的修饰名与签名s
*/

//类型定义
typedef unsigned char		uint_8;
typedef unsigned char		BYTE;
typedef unsigned short		WORD;
typedef unsigned int		DWORD;
typedef unsigned int            COLORKEY;
typedef unsigned int            COLORREF;
typedef unsigned int   		UINT;
typedef unsigned short  	WORD;
typedef unsigned short  	USHORT;
typedef int            		LONG;
typedef int 			BOOL;
typedef unsigned int*  		LPDWORD;
typedef void* 			LPVOID;
typedef void* 			HANDLE;
typedef long long  Int64;
//头文件
#include <stdio.h>
#include <stdlib.h>
#include <pthread.h>
#include <sys/socket.h>
#include <arpa/inet.h>
#include <iostream>
#include <queue>
#include <string.h>

//摄像头数据
typedef struct _SP_NEURALNETWORK_CAMERA
{
	char		strCamera[CHAR_SIZE];	//摄像头IP
	char		strPort[CHAR_SIZE];	//摄像头端口
	char		strUser[CHAR_SIZE];	//摄像头帐号
	char		strPasswd[CHAR_SIZE];	//摄像头密码
}SP_NEURALNETWORK_CAMERA, *PSP_NEURALNETWORK_CAMERA;
//图像结构
typedef struct _SP_NEURALNETWORK_IMAGE
{
	char		strCamera[CHAR_SIZE];	//摄像头IP
	int		nNumber;		//编号
	int		nFrameSpeed;		//帧数率
	int		nLength;		//图像长度
	double		dbRisk;			//当前识别器机率
	unsigned char*	pImage;			//图像数据
}SP_NEURALNETWORK_IMAGE, *PSP_NEURALNETWORK_IMAGE;
//图像结构
typedef struct _SP_RTSP_IMAGE
{
	char			RtspCamera[CHAR_SIZE];	//摄像头IP
	int				nLength;		//图像长度
	unsigned char*	pImage;			//图像数据
}SP_RTSP_IMAGE,*PSP_RTSP_IMAGE;
//神经网络框体标识
typedef struct _SP_NEURALNETWORK_HEAD
{
	char		strCamera[CHAR_SIZE];	//摄像头IP(例:192.168.200.60)
	int		nIdentify;		//标识(例:NEURALNETWORK_NULL)
	int		nFrameSpeed;		//帧数率
	int		nFrameSize;		//后续框体个数
}SP_NEURALNETWORK_HEAD, *PSP_NEURALNETWORK_HEAD;

//神经网络框体结构
typedef struct _SP_NEURALNETWORK_RECT
{
        int             nType;			//类别
        int             nRisk;			//几率
        int             nMinX;			//左上角X
        int             nMinY;			//左上角Y
        int             nMaxX;			//右下角X
        int             nMaxY;			//右下角Y
}SP_NEURALNETWORK_RECT, *PSP_NEURALNETWORK_RECT;

//看守与联动结构定义
typedef struct _SP_CAMERA_FRAME_STATE
{
	char		strCamera[CHAR_SIZE];	//摄像头IP
	int		nState;			//分析状态
	int		nRisk;			//当前识别器机率
}SP_CAMERA_FRAME_STATE, *PSP_CAMERA_FRAME_STATE;

//虚拟围栏结构头定义
typedef struct _SP_VIRTUAL_FENCE_HEAD
{
	char		strCamera[CHAR_SIZE];	//摄像头IP
	int		nNumber;		//编号
	int		nMaxFA;			//总虚拟围栏
	int		nPoint;			//虚拟围栏坐标数量
}SP_VIRTUAL_FENCE_HEAD, *PSP_VIRTUAL_FENCE_HEAD;

//虚拟围栏结构点定义
typedef struct _SP_VIRTUAL_FENCE_INFO
{
	int		nPosX;			//X坐标
	int		nPosY;			//Y坐标
}SP_VIRTUAL_FENCE_INFO, *PSP_VIRTUAL_FENCE_INFO;


typedef struct _SP_RTSPDATA{
	unsigned char* pImage;
	int nSize;
	long long nFrameNum;
}SP_RTSPDATA,PSP_RTSPDATA;

typedef struct _SP_SOCKET_DATA
{
	int 		sockClient;
	struct 		sockaddr_in	 addrClient;
}SP_SOCKET_DATA,*PSP_SOCKET_DATA;

//回调定义
typedef void(*LPSOCKETCALLBACKEX)(void* pSocket, int nSocket, void* zParam);		//套接字回调

#endif//_API_DEFINE_HPP_H

Logo

更多推荐