音視頻流入門——FFMpeg

[圖片上傳失敗...(image-434ae3-1598596817566)]

歡迎star的項(xiàng)目

測試視頻:

一、Tutorial1——視頻截幀保存

https://github.com/wlxklyh/FFMpegStudy/blob/master/win/Tutorial1
這個(gè)是9361幀的截圖

image

流程說明:

  1. 初始化
    av_register_all()
    AVFormatContext pFormatCtx = avformat_alloc_context()
  2. 打開文件(會讀取header)
    avformat_open_input(pFormatCtx,filepath)
  3. 檢查和獲取流信息的API
    avformat_find_stream_info()
    av_dump_format()
  4. 從pFormatCtx中得到那個(gè)streamsindex是視頻流
    pFormatCtx->stream[i]->codec->codec_type == ACMEIDIA_TYPE_VIDEO
  5. 從視頻流里面得到CodeContext 編解碼上下文
  6. 從而得到解碼器
  7. 讀取幀
    av_read_frame 得到avpacket(存儲的可能是視頻流 也可能是音頻流)
  8. 判斷avpacket是不是視頻流
  9. 從讀取出的avpacket中讀取AVFrame出來
  10. 用sws_scale轉(zhuǎn)AVFrame的格式 大多可能是YUV 我們要轉(zhuǎn)成RGB保存
  11. SaveFrame
    fwrite(frame->data[0]+pFrame->linesize[0],1,width*3,pFile)
#include "stdafx.h"
#include <fstream>

//將 AVFrame 保存成圖片文件  ppm格式
void SaveFrame(AVFrame* pFrame, int width, int height, int iFrame)
{
    //(1)文件名
    char szFileName[32];
    sprintf(szFileName, "frame%04d.ppm", iFrame);


    //(2)打開文件
    FILE *pFile;
    pFile = fopen(szFileName, "wb");
    if (pFile == NULL)
    {
        return;
    }

    //(3)寫入ppm文件的文件頭 
    fprintf(pFile, "P6\n%d %d\n255\n", width, height);

    //(4)寫入ppm文件的內(nèi)容
    for (int i = 0; i < height; i++)
    {
        fwrite(pFrame->data[0] + i * pFrame->linesize[0], 1, width * 3, pFile);
    }
}


int _tmain(int argc, _TCHAR* argv[])
{
    //文件名字 
    char filepath[] = "nwn.mp4";

    //(1)這里注冊了所有的文件格式和編解碼器的庫 所以他們將被自動的使用在被打開的合適格式的文件  只需要注冊一次
    av_register_all();
    avformat_network_init();
    AVFormatContext *pFormatCtx;
    pFormatCtx = avformat_alloc_context();

    //(2)打開一個(gè)文件 打開之后pFormatCtx就有有了文件句柄  這個(gè)會打開文件且讀取Header
    if (avformat_open_input(&pFormatCtx, filepath, NULL, NULL) != 0) 
    {
        return -1;
    }

    //(3)檢查在文件中的流的信息
    if(avformat_find_stream_info(pFormatCtx,0)<0)
    {
        return -1;
    }

    //(4)dump下信息
    av_dump_format(pFormatCtx,0, filepath,0);


    AVCodecContext *pCodecCtx;
    int videoStream = -1;
    //pFormatCtx->Streams 僅僅是一組pFormatCtx->nb_streams 的指針 包含了哪些流
    for(int i=0; i<pFormatCtx->nb_streams;i++)
    {
        if(pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
        {
            videoStream = i;
            break;
        }
    }

    if(videoStream == -1)
    {
        return -1; 
    }

    pCodecCtx = pFormatCtx->streams[videoStream]->codec;

    AVCodec *pCodec;


    pCodec = avcodec_find_decoder(pCodecCtx->codec_id);

    if(pCodec == NULL)
    {
        fprintf(stderr, "Unsupported codec!\n");
        return -1;
    }

    if(avcodec_open(pCodecCtx,pCodec) < 0)
    {
        return -1;
    }

    AVFrame *pFrame;
    pFrame = avcodec_alloc_frame();

    AVFrame *pFrameRGB = avcodec_alloc_frame();
    if(pFrameRGB == NULL)
    {
        return -1;
    }
    uint8_t *buffer;
    int numBytes;
    numBytes = avpicture_get_size(PIX_FMT_RGB24, pCodecCtx->width, pCodecCtx->height);

    buffer = (uint8_t*)av_malloc(numBytes * sizeof(uint8_t));

    //數(shù)據(jù)是從buffer來的  這里相當(dāng)于是pFrameRGB初始化數(shù)據(jù) 可能是個(gè)黑色的圖片
    avpicture_fill((AVPicture*)pFrameRGB, buffer, PIX_FMT_RGB24, pCodecCtx->width, pCodecCtx->height);


    int frameFinished;
    AVPacket packet;
    int i = 0;
    //(5)循環(huán)從Steams中 讀取出frame packet通常包含一個(gè)壓縮的Frame 音頻則可能是多個(gè)Frame
    while(av_read_frame(pFormatCtx,&packet)>=0)
    {
        //stream_index Packet所在stream的index 通過這個(gè)來判斷是不是視頻幀
        if(packet.stream_index == videoStream)
        {
            //把視頻幀解壓到Frame中
            avcodec_decode_video2(pCodecCtx,pFrame,&frameFinished,&packet);
            if(frameFinished)
            {
                //舊版本
                //img_convert((AVPicture *)pFrameRGB, PIX_FMT_RGB24,(AVPicture*)pFrame, pCodecCtx->pix_fmt,pCodecCtx->width, pCodecCtx->height);
                //轉(zhuǎn)換參數(shù) 
                SwsContext *img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height, PIX_FMT_RGB24, SWS_BICUBIC, NULL, NULL, NULL);
                //轉(zhuǎn)換
                sws_scale(img_convert_ctx, (const uint8_t* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height, pFrameRGB->data, pFrameRGB->linesize);
                ++i;
                if( i%120 == 1)
                {
                    SaveFrame(pFrameRGB, pCodecCtx->width, pCodecCtx->height, i);
                }
            }
            av_free_packet(&packet);
        }
    }
    av_free(buffer);
    av_free(pFrame);
    av_free(pFrameRGB);

    avcodec_close(pCodecCtx);

    avformat_close_input(&pFormatCtx);


    return 0;
}

二、Tutorial2——視頻顯示到屏幕(SDL)

https://github.com/wlxklyh/FFMpegStudy/blob/master/win/Tutorial2
[圖片上傳失敗...(image-39968a-1598596817566)]
跟Tutorial1大致一致 除了SDL部分 和 Frame編碼為YUV

  1. SDL部分
SDL_Init//SDL初始化
SDL_Surface* screen = SDL_SetVideoMode//初始化一個(gè)屏幕
SDL_Overlay* bmp = SDL_CreateYUVOverlay//有點(diǎn)像Screen上面的一個(gè)View
//YUVOverlay  賦值
SDL_LockYUVOverlay 
bmp->pixels[0] = nowYUVAVFrame->data[0];
...
bmp->pitches[0] = nowYUVAVFrame->linesize[0];
...
//顯示
SDL_DisplayYUVOverlay
  1. Frame編碼為YUV
    注意Frame的初始化 格式是PIX_FMT_YUV420P
    avpicture_get_size(PIX_FMT_YUV420P, avcodecContext->width, avcodecContext->height);
    sws_scale 轉(zhuǎn)換的時(shí)候格式是PIX_FMT_YUV420P

流程說明:

  1. 初始化編碼器庫
  2. 打開視頻文件 avformat_open_input
  3. 查找視頻流 pFormatCtx->stream[i]->codec_type == VEDIO
  4. 得到解碼器上下文 pFormatCtx->stream[vedioIndex]->codec
  5. 得到解碼器 avcode_find_decoder(avVideoCondecCtx)
  6. 打開解碼器 avcode_open
  7. 幀初始化 AVFrame avpicture_fill
  8. SDL初始化 SDL_Surface SDL_Overlay
  9. 初始化一個(gè)packet av_new_packet
  10. 逐幀讀取 av_rean_frame
    1. 如果是視頻流avcodec_decode_video 解碼
    2. 轉(zhuǎn)換上下文 swscontext
    3. sws_scale轉(zhuǎn)換成NowYUVFrame
    4. 賦值SDL_Overlay
    5. 顯示SDL_DisplayYUVOverlay
#include "stdafx.h"
#include <fstream>

int _tmain(int argc, _TCHAR* argv[])
{
    char filepath[] = "nwn.mp4";

    //(1)這里注冊了所有的文件格式和編解碼器的庫 所以他們將被自動的使用在被打開的合適格式的文件  只需要注冊一次
    av_register_all();
    avformat_network_init();
    AVFormatContext  *pFormatContext = avformat_alloc_context();

    //(2)打開文件
    if(avformat_open_input(&pFormatContext,filepath, NULL, NULL)!=0)
    {
        //打開失敗
        return -1;
    }

    //(3)打印信息
    if (av_find_stream_info(pFormatContext) < 0)
    {
        printf("Couldn't find stream information.\n");
        return -1;
    }
    av_dump_format(pFormatContext, 0, filepath, 0);


    //(4)查找視頻流
    int vedioIndex = -1;
    for(int i=0;i<pFormatContext->nb_streams;i++)
    {
        if(pFormatContext->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
        {
            vedioIndex = i;
            break;
        }
    }
    if(vedioIndex == -1)
    {
        return -1;
    }

    //(5)得到視頻流的解碼器
    AVCodec * avcodec;
    AVCodecContext * avcodecContext;
    avcodecContext = pFormatContext->streams[vedioIndex]->codec;
    avcodec = avcodec_find_decoder(avcodecContext->codec_id);
    if (avcodec == NULL)
    {
        fprintf(stderr, "Unsupported codec!\n");
        return -1;
    }
    if (avcodec_open(avcodecContext, avcodec) < 0)
    {
        return -1;
    }

    //(6)幀初始化
    AVFrame *nowAVFrame = avcodec_alloc_frame();
    AVFrame *nowYUVAVFrame = avcodec_alloc_frame();
    if (nowAVFrame == NULL || nowYUVAVFrame == NULL)
    {
        return -1;
    }
    uint8_t *buffer;
    int numBytes;
    numBytes = avpicture_get_size(PIX_FMT_YUV420P, avcodecContext->width, avcodecContext->height);
    buffer = (uint8_t*)av_malloc(numBytes * sizeof(uint8_t));
    //數(shù)據(jù)是從buffer來的  這里相當(dāng)于是pFrameRGB初始化數(shù)據(jù) 可能是個(gè)黑色的圖片
    avpicture_fill((AVPicture*)nowYUVAVFrame, buffer, PIX_FMT_YUV420P, avcodecContext->width, avcodecContext->height);

    //-------SDL
    if (SDL_Init((SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)))
    {
        return -1;
    }
    SDL_Rect rect;
    SDL_Surface* screen = SDL_SetVideoMode(avcodecContext->width,avcodecContext->height,0,0);
    SDL_Overlay* bmp = SDL_CreateYUVOverlay(avcodecContext->width, avcodecContext->height, SDL_YV12_OVERLAY, screen);
    //-------SDL

    int frameFinished;
    AVPacket *packet = (AVPacket *)malloc(sizeof(AVPacket));
    int y_size = avcodecContext->width * avcodecContext->height;
    av_new_packet(packet, y_size);
    int i = 0;
    //(6)循環(huán)從Steams中 讀取出frame packet通常包含一個(gè)壓縮的Frame 音頻則可能是多個(gè)Frame
    while (av_read_frame(pFormatContext, packet) >= 0)
    {
        //stream_index Packet所在stream的index 通過這個(gè)來判斷是不是視頻幀
        if (packet->stream_index == vedioIndex)
        {
            //把視頻幀解壓到Frame中
            avcodec_decode_video2(avcodecContext, nowAVFrame, &frameFinished, packet);
            if (frameFinished)
            {
                //轉(zhuǎn)換參數(shù) 
                SwsContext *img_convert_ctx = sws_getContext(avcodecContext->width, avcodecContext->height, avcodecContext->pix_fmt, avcodecContext->width, avcodecContext->height, 
                    PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);
                //轉(zhuǎn)換
                sws_scale(img_convert_ctx, (const uint8_t* const*)nowAVFrame->data, nowAVFrame->linesize, 0, avcodecContext->height, nowYUVAVFrame->data, nowYUVAVFrame->linesize);

                //-------SDL
                SDL_LockYUVOverlay(bmp);
                bmp->pixels[0] = nowYUVAVFrame->data[0];
                bmp->pixels[2] = nowYUVAVFrame->data[1];
                bmp->pixels[1] = nowYUVAVFrame->data[2];
                bmp->pitches[0] = nowYUVAVFrame->linesize[0];
                bmp->pitches[2] = nowYUVAVFrame->linesize[1];
                bmp->pitches[1] = nowYUVAVFrame->linesize[2];
                SDL_UnlockYUVOverlay(bmp);

                rect.x = 0;
                rect.y = 0;
                rect.w = avcodecContext->width;
                rect.h = avcodecContext->height;
                SDL_DisplayYUVOverlay(bmp, &rect);
                //延時(shí)40ms
                SDL_Delay(40);
                //-------SDL
            }
            av_free_packet(packet);
        }
    }

    av_free(buffer);
    av_free(nowAVFrame);
    av_free(nowYUVAVFrame);
    avcodec_close(avcodecContext);
    avformat_close_input(&pFormatContext);


    return 0;
}


三、Tutorial3——播放音頻

https://github.com/wlxklyh/FFMpegStudy/blob/master/win/Tutorial3
播放視頻的代碼請看Tutorial2這里只說播放音頻的

跟播放視頻不一樣的地方:

  1. 要解碼音頻 然后塞入到一個(gè)隊(duì)列
  2. 讀取音頻的是在另外一個(gè)線程的回調(diào) 所以主線程是生產(chǎn)者(生產(chǎn)packet)、音頻線程是消費(fèi)者(消耗packet)
  3. 主線程解碼音頻的過程跟解碼視頻的類似 逐幀解碼后塞入隊(duì)列packet_queue_put
  4. 回調(diào)是異步線程需要取隊(duì)列packet_queue_get 然后

音頻解碼流程:

  1. 查找音頻流
  2. 得到音頻流解碼器上下文
  3. 得到音頻解碼器
  4. 打開音頻解碼器
  5. 幀初始化
  6. 逐幀得到音頻的packet
  7. 把音頻packet塞入隊(duì)列
  8. 回調(diào)函數(shù)audio_callback
    1. 取出一幀的數(shù)據(jù):audio_decode_frame
      1. 從隊(duì)列里面取出一個(gè):packetpacket_queue_get
      2. 音頻解碼:avcodec_decode_audio4
      3. 獲取音頻buff大小:av_samples_get_buffer_size
      4. 根據(jù)buff大?。篎rame->data 取出來
    2. 往音頻線程的 stream賦值
#include "stdafx.h"
#include <fstream>

#define SDL_AUDIO_BUFFER_SIZE 1024
typedef struct PacketQueue {
    AVPacketList *first_pkt, *last_pkt;
    int nb_packets;
    int size;
    SDL_mutex *mutex;
    SDL_cond *cond;
} PacketQueue;

PacketQueue audioq;


int quit = 0;
//隊(duì)列初始化
void packet_queue_init(PacketQueue*q)
{
    memset(q, 0, sizeof(PacketQueue));
    q->mutex = SDL_CreateMutex();
    q->cond = SDL_CreateCond();
}
//出隊(duì)列
static int packet_queue_get(PacketQueue *q, AVPacket *packatToPop, int block)
{
    AVPacketList *pktToPop;
    int ret;
    //獲取要加鎖
    SDL_LockMutex(q->mutex);

    while (true) {
        if (quit) {
            ret = -1;
            break;
        }

        pktToPop = q->first_pkt;
        if (pktToPop) {
            q->first_pkt = pktToPop->next;
            if (!q->first_pkt)
                q->last_pkt = NULL;
            q->nb_packets--;
            q->size -= pktToPop->pkt.size;
            *packatToPop = pktToPop->pkt;
            av_free(pktToPop);
            ret = 1;
            break;
        }
        else if (!block) {
            ret = 0;
            break;
        }
        else {
            SDL_CondWait(q->cond, q->mutex);
        }
    }
    SDL_UnlockMutex(q->mutex);
    return ret;
}
//入隊(duì)列
int packet_queue_put(PacketQueue* q, AVPacket*pkt)
{
    AVPacketList *pkt1;
    if (av_dup_packet(pkt) < 0)//將shared buffer 的AVPacket duplicate(復(fù)制)到獨(dú)立的buffer中
    {
        return -1;
    }
    pkt1 = (AVPacketList *)av_malloc(sizeof(AVPacketList));
    if (pkt1 == NULL)
    {
        return -1;
    }
    pkt1->pkt = *pkt;
    pkt1->next = NULL;

    SDL_LockMutex(q->mutex);

    if (!q->last_pkt)
    {
        q->first_pkt = pkt1;
    }
    else
    {
        q->last_pkt->next = pkt1;
    }
    q->last_pkt = pkt1;
    q->nb_packets++;
    q->size += pkt1->pkt.size;
    SDL_CondSignal(q->cond);

    SDL_UnlockMutex(q->mutex);
}

//從packet隊(duì)列解碼出數(shù)據(jù)塞到audio_buf中
int audio_decode_frame(AVCodecContext *pAudioCodecCtx, uint8_t *audio_buf, int buf_size) 
{
    static AVFrame *decodedAudioFrame = avcodec_alloc_frame();
    if(decodedAudioFrame == NULL)
    {
        exit(1);
    }

    static AVPacket pkt, pktTemp;
    int len1, data_size;

    while(true) 
    {
        //如果取出的數(shù)據(jù)大于0
        while (pktTemp.size > 0)
        {
            //默認(rèn)幀
            avcodec_get_frame_defaults(decodedAudioFrame);
            
            int hasGotFrame = 0;
            //音頻解碼 返回長度
            len1 = avcodec_decode_audio4(pAudioCodecCtx, decodedAudioFrame, &hasGotFrame, &pktTemp);


            //檢查是否有編碼
            if (len1 < 0) {
                pktTemp.size = 0;
                break; // skip packet
            }

            if (hasGotFrame) {
                printf("\nGot frame!");

                data_size = av_samples_get_buffer_size(NULL, pAudioCodecCtx->channels,
                    decodedAudioFrame->nb_samples,
                    pAudioCodecCtx->sample_fmt, 1);
                if (data_size > buf_size) {
                    data_size = buf_size;
                }
                memcpy(audio_buf, decodedAudioFrame->data[0], data_size);

            }
            else {
                data_size = 0;
            }

            printf("\nData size %d", data_size);
            pktTemp.data += len1;
            pktTemp.size -= len1;

            if (data_size <= 0) {
                continue;
            }

            return data_size;
        }


        if (pkt.data)
            av_free_packet(&pkt);

        if (quit)
        {
            return -1;
        }

        //從隊(duì)列里面獲取 這里是在音頻線程 如果沒有音頻數(shù)據(jù)則會wait等待生產(chǎn)者的信號量
        if (packet_queue_get(&audioq, &pkt, 1) < 0)
        {
            return -1;
        }


        av_init_packet(&pktTemp);

        pktTemp.data = pkt.data;
        pktTemp.size = pkt.size;
    }
}

//音頻線程回調(diào)
void audio_callback(void *userdata, Uint8 *stream, int len) {

    //回調(diào)傳入的解碼器上下文
    AVCodecContext *pAudioCodecCtx = (AVCodecContext *)userdata;
    int len1, audio_size;

    static uint8_t audio_buf[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
    static unsigned int audio_buf_size = 0;
    static unsigned int audio_buf_index = 0;

    while (len > 0) {
        /// audio
        if (audio_buf_index >= audio_buf_size) {
            //從隊(duì)列里面讀取出解碼后的音頻數(shù)據(jù)
            audio_size = audio_decode_frame(pAudioCodecCtx, audio_buf, sizeof(audio_buf));
            if (audio_size < 0) {
                /* If error, output silence */
                audio_buf_size = 1024; // arbitrary?
                memset(audio_buf, 0, audio_buf_size);
            }
            else {
                audio_buf_size = audio_size;
            }
            audio_buf_index = 0;
        }

        len1 = audio_buf_size - audio_buf_index;
        if (len1 > len)
            len1 = len;
        memcpy(stream, (uint8_t *)audio_buf + audio_buf_index, len1);
        len -= len1;
        stream += len1;
        audio_buf_index += len1;
    }
}

int _tmain(int args,_TCHAR* argv[])
{
    //視頻文件名字
    char videoFilePath[] = "nwn.mp4";

    //(1)這里注冊了所有的文件格式和編解碼器的庫 所以他們將被自動的使用在被打開的合適格式的文件  需要注冊一次
    av_register_all();
    avformat_network_init();
    AVFormatContext *pFormatCtx = avformat_alloc_context();//這句應(yīng)該是最新的FFMpeg的要求

    //(2)打開文件
    if(avformat_open_input(&pFormatCtx,videoFilePath,NULL,NULL)!=0)
    {
        //打開失敗
        return -1;
    }
    //打印視頻文件信息
    if(av_find_stream_info(pFormatCtx) < 0)
    {
        return -1;
    }
    av_dump_format(pFormatCtx, 0, videoFilePath, 0);

    //(3)查找視頻流、音頻流
    int vedioIndex = -1;
    int audioIndex = -1;
    for(int i=0;i<pFormatCtx->nb_streams;i++)
    {
        if(pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
        {
            vedioIndex = i;
            continue;
        }
        if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO)
        {
            audioIndex = i;
            continue;
        }
    }
    if(vedioIndex == -1 || audioIndex == -1)
    {
        //找不到視頻流
        return -1;
    }

    //(4)得到視頻流的解碼器
    AVCodec *avVideoCodec, *avAudioCodec;
    AVCodecContext *avVideoCodecCtx,*avAudioCodecCtx;
    
    avVideoCodecCtx = pFormatCtx->streams[vedioIndex]->codec;
    avAudioCodecCtx = pFormatCtx->streams[audioIndex]->codec;

    avVideoCodec = avcodec_find_decoder(avVideoCodecCtx->codec_id);
    avAudioCodec = avcodec_find_decoder(avAudioCodecCtx->codec_id);
    if(avVideoCodec == NULL|| avAudioCodec==NULL)
    {
        return -1;
    }
    //打開視頻解碼器 打開音頻解碼器
    if(avcodec_open2(avVideoCodecCtx,avVideoCodec,NULL) < 0)
    {
        return -1;
    }
    if (avcodec_open2(avAudioCodecCtx, avAudioCodec, NULL) < 0)
    {
        return -1;
    }

    packet_queue_init(&audioq);
    
    //初始化音頻流的SDL  
    SDL_AudioSpec   wanted_spec, spec;
    wanted_spec.freq = avAudioCodecCtx->sample_rate;                //采樣率:
    wanted_spec.format = AUDIO_S16SYS;                              //格式:S16SYS signed 每個(gè)樣本16位 SYS代表大小端跟系統(tǒng)一樣
    wanted_spec.channels = avAudioCodecCtx->channels;               //通道數(shù)
    wanted_spec.silence = 0;                                        //靜音的值 0代表靜音 
    wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;                    //音頻緩沖區(qū)的尺寸
    wanted_spec.callback = audio_callback;                          //回調(diào)函數(shù)
    wanted_spec.userdata = avAudioCodecCtx;                         //
    if (SDL_OpenAudio(&wanted_spec, &spec) < 0)
    {
        return -1;
    }


    //這個(gè)函數(shù)一定要在SDL_OpenAudio之后調(diào)用 可以讓你安全的初始化回調(diào)函數(shù)
    SDL_PauseAudio(0);



    //(5)幀初始化
    AVFrame *nowAVFrame = avcodec_alloc_frame();
    AVFrame *nowYUVAVFrame = avcodec_alloc_frame();
    if(nowAVFrame == NULL || nowYUVAVFrame == NULL)
    {
        return -1;
    }
    int numBytes = avpicture_get_size(PIX_FMT_YUV420P, avVideoCodecCtx->width, avVideoCodecCtx->height);
    uint8_t *buffer = (uint8_t*)av_malloc(numBytes*sizeof(uint8_t));
    avpicture_fill((AVPicture*)nowYUVAVFrame, buffer, PIX_FMT_YUV420P, avVideoCodecCtx->width, avVideoCodecCtx->height);


    //(6)SDL
    if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER))
    {
        return -1;
    }
    SDL_Rect rect;
    SDL_Surface *screen = SDL_SetVideoMode(avVideoCodecCtx->width, avVideoCodecCtx->height, 0, 0);
    SDL_Overlay *bitmap = SDL_CreateYUVOverlay(avVideoCodecCtx->width, avVideoCodecCtx->height, SDL_YV12_OVERLAY, screen);

    //初始化一個(gè)Packet
    int frameFinished;
    AVPacket *packet = (AVPacket*)av_malloc(sizeof(AVPacket));
    int YSize = avVideoCodecCtx->width*avVideoCodecCtx->height;
    av_new_packet(packet, YSize);

    //(7)循環(huán)的讀幀
    while(av_read_frame(pFormatCtx,packet) >= 0)
    {
        if(packet->stream_index ==vedioIndex)
        {
            avcodec_decode_video2(avVideoCodecCtx, nowAVFrame, &frameFinished, packet);
            if(frameFinished)
            {
                SwsContext *imageConvertCtx = sws_getContext(avVideoCodecCtx->width, avVideoCodecCtx->height, avVideoCodecCtx->pix_fmt, avVideoCodecCtx->width, avVideoCodecCtx->height,
                    PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);
                sws_scale(imageConvertCtx, (const uint8_t* const*)nowAVFrame->data, nowAVFrame->linesize, 0, avVideoCodecCtx->height, nowYUVAVFrame->data, nowYUVAVFrame->linesize);

                SDL_LockYUVOverlay(bitmap);
                bitmap->pixels[0] = nowYUVAVFrame->data[0];
                bitmap->pixels[2] = nowYUVAVFrame->data[1];
                bitmap->pixels[1] = nowYUVAVFrame->data[2];
                bitmap->pitches[0] = nowYUVAVFrame->linesize[0];
                bitmap->pitches[2] = nowYUVAVFrame->linesize[1];
                bitmap->pitches[1] = nowYUVAVFrame->linesize[2];
                SDL_UnlockYUVOverlay(bitmap);

                rect.x = 0;
                rect.y = 0;
                rect.w = avVideoCodecCtx->width;
                rect.h = avVideoCodecCtx->height;

                SDL_DisplayYUVOverlay(bitmap, &rect);
                //這里可以設(shè)置一下 讓視頻音頻看起來同步 
                SDL_Delay(0);
            }
            
        }
        else if(packet->stream_index == audioIndex)
        {
            packet_queue_put(&audioq,packet);
        }
        else
        {
            av_free_packet(packet);
        }
    }

    //釋放內(nèi)存
    av_free(buffer);
    av_free(nowAVFrame);
    av_free(nowAVFrame);
    avcodec_close(avVideoCodecCtx);
    avformat_close_input(&pFormatCtx);
}

四、Tutorial4、5、6、7——?jiǎng)?chuàng)建線程、同步視頻、同步音頻、快進(jìn)

先看著代碼:
https://github.com/wlxklyh/FFMpegStudy/blob/master/win/Tutorial456

引用

1、雷神的FFMpeg最小demo
https://blog.csdn.net/leixiaohua1020/article/details/10528443

2、FFmpeg Tutorial.pdf 中文翻譯文檔
https://github.com/wlxklyh/FFMpegStudy/blob/master/FFmpeg%20Tutorial.pdf

3、視頻連接:

后面學(xué)習(xí)FFMpeg(win)都從這里拷貝出來 不用管環(huán)境和頭文件的問題。

歡迎加微信討論(備注Gituhb)

image

?著作權(quán)歸作者所有,轉(zhuǎn)載或內(nèi)容合作請聯(lián)系作者
【社區(qū)內(nèi)容提示】社區(qū)部分內(nèi)容疑似由AI輔助生成,瀏覽時(shí)請結(jié)合常識與多方信息審慎甄別。
平臺聲明:文章內(nèi)容(如有圖片或視頻亦包括在內(nèi))由作者上傳并發(fā)布,文章內(nèi)容僅代表作者本人觀點(diǎn),簡書系信息發(fā)布平臺,僅提供信息存儲服務(wù)。

友情鏈接更多精彩內(nèi)容