概要:就是上一章解碼的一個逆過程:yuv420編碼為H264格式。
代碼倉庫:https://github.com/wulang150/FFmpegTest.git
代碼文件:EncoderViewController.m
全部代碼:
#import "EncoderViewController.h"
#include <libavformat/avformat.h>
#include <libavcodec/avcodec.h>
#include <libavutil/opt.h>
#include <libavutil/imgutils.h>
@interface EncoderViewController ()
{
AVPacket *pkt;
AVFrame *frame;
AVCodecContext *codecCtx;
// AVCodec *codec;
NSInteger allSize;
}
@end
@implementation EncoderViewController
- (void)viewDidLoad {
[super viewDidLoad];
// Do any additional setup after loading the view.
self.view.backgroundColor = [UIColor whiteColor];
self.title = @"編碼";
}
- (void)touchesBegan:(NSSet<UITouch *> *)touches withEvent:(UIEvent *)event{
[self mainFunc];
}
- (void)mainFunc{
av_register_all();
avcodec_register_all();
FILE *f;
// uint8_t endcode[] = { 0, 0, 1, 0xb7 };
NSString *filePath = [CommonFunc getDefaultPath:@"movie.h264"];
const char *filename = [filePath cStringUsingEncoding:NSASCIIStringEncoding];
//保存的文件
f = fopen(filename, "wb");
if (!f) {
fprintf(stderr, "Could not open %s\n", filename);
exit(1);
}
//分辨率
int width = 1600, height = 1200;
pkt = av_packet_alloc();
if(!pkt){
return;
}
frame = alloc_picture(AV_PIX_FMT_YUV420P, width, height);
if(frame==NULL){
return;
}
//創(chuàng)建編碼上下文
codecCtx = [self findEncoder:frame];
if(codecCtx==NULL){
return;
}
[self readYUV:width height:height callback:^(AVFrame *tframe) {
[self yuv420ToH264:tframe codeCtx:self->codecCtx callBack:^(AVPacket *pkt) {
fwrite(pkt->data, 1, pkt->size, f);
}];
}];
NSLog(@">>>>>begin!!!!!");
[self yuv420ToH264:NULL codeCtx:codecCtx callBack:^(AVPacket *pkt) {
fwrite(pkt->data, 1, pkt->size, f);
}];
NSLog(@">>>>>end allSize=%.2fKB",allSize/1024.0);
//結(jié)尾處理
/* add sequence end code to have a real MPEG file */
// fwrite(endcode, 1, sizeof(endcode), f);
fclose(f);
avcodec_free_context(&codecCtx);
av_frame_free(&frame);
av_packet_free(&pkt);
}
- (void)readYUV:(int)width height:(int)height callback:(void(^)(AVFrame *tframe))callBack{
//讀取yuv數(shù)據(jù)
// NSString *yuvPath = [[NSBundle mainBundle] pathForResource:@"vedio" ofType:@"yuv"];
// NSString *yuvPath = [CommonFunc getDocumentWithFile:@"vedio.yuv"];
NSString *yuvPath = [CommonFunc getDocumentWithFile:@"11_23_07_movie.yuv"];
if(![[NSFileManager defaultManager] fileExistsAtPath:yuvPath]){
NSLog(@"file error!");
return;
}
// NSString *yPath = [CommonFunc getDefaultPath:@"output_420_y.y"];
FILE *fp=fopen([yuvPath UTF8String],"rb+");
// FILE *fp1=fopen([yPath UTF8String],"wb+");
unsigned char *pic=(unsigned char *)malloc(width*height*3/2);
int i = 0;
while (true)
{
unsigned long ret = fread(pic,1,width*height*3/2,fp);
if(ret<width*height*3/2){
break;
}
memcpy(frame->data[0], pic, width*height);
// frame->linesize[0] = width;
memcpy(frame->data[1], pic+width*height, width*height/4);
// frame->linesize[1] = width/2;
memcpy(frame->data[2], pic+width*height*5/4, width*height/4);
// frame->linesize[2] = width/2;
frame->pts = i++;
if(callBack){
callBack(frame);
}
}
free(pic);
fclose(fp);
}
AVFrame *alloc_picture(enum AVPixelFormat pix_fmt, int width, int height)
{
AVFrame *picture;
int ret;
picture = av_frame_alloc();
if (!picture)
return NULL;
picture->format = pix_fmt;
picture->width = width;
picture->height = height;
/* allocate the buffers for the frame data */
ret = av_frame_get_buffer(picture, 32);
if (ret < 0) {
fprintf(stderr, "Could not allocate frame data.\n");
return NULL;
}
return picture;
}
//找到生產(chǎn)對應(yīng)的編碼器
- (AVCodecContext *)findEncoder:(AVFrame *)frame{
AVCodecContext *c= NULL;
int ret = -1;
AVCodec *codec = avcodec_find_encoder(AV_CODEC_ID_H264);
if (!codec) {
fprintf(stderr, "Codec not found\n");
return c;
}
c = avcodec_alloc_context3(codec);
if (!c) {
fprintf(stderr, "Could not allocate video codec context\n");
return c;
}
/* put sample parameters */
c->bit_rate = 900000;
/* resolution must be a multiple of two */
c->width = frame->width;
c->height = frame->height;
/* frames per second */
c->time_base = (AVRational){1, 15};
// c->framerate = (AVRational){15, 1};
/* emit one intra frame every ten frames
* check frame pict_type before passing frame
* to encoder, if frame->pict_type is AV_PICTURE_TYPE_I
* then gop_size is ignored and the output of encoder
* will always be I frame irrespective to gop_size
*/
c->gop_size = 30;
c->max_b_frames = 0;
c->pix_fmt = frame->format;
// if (codec->id == AV_CODEC_ID_H264)
// av_opt_set(c->priv_data, "preset", "slow", 0);
/* open it */
ret = avcodec_open2(c, codec, NULL);
if (ret < 0) {
fprintf(stderr, "Could not open codec: %s\n", av_err2str(ret));
return NULL;
}
return c;
}
- (void)yuv420ToH264:(AVFrame *)frame codeCtx:(AVCodecContext *)codecCtx callBack:(void(^)(AVPacket *enPkt))callBack{
if(codecCtx==NULL){
return;
}
int ret;
/* send the frame to the encoder */
ret = avcodec_send_frame(codecCtx, frame);
if (ret < 0) {
fprintf(stderr, "Error sending a frame for encoding\n");
return;
}
if (frame)
printf("Send frame %3"PRId64"\n", frame->pts);
while (ret >= 0) {
ret = avcodec_receive_packet(codecCtx, pkt);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
return;
else if (ret < 0) {
fprintf(stderr, "Error during encoding\n");
return;
}
// pkt->pts = pkt->dts = pkt->pts * (codecCtx->time_base.den) /codecCtx->time_base.num / 15;
printf("Write packet pts=%lld dts=%lld (size=%5d)\n", pkt->pts, pkt->dts, pkt->size);
allSize += pkt->size;
if(callBack){
callBack(pkt);
}
av_packet_unref(pkt);
}
}
@end
上一篇,我們得到了解碼后的yuv420格式的文件,這次就得去讀取那個文件,然后進(jìn)行編碼。
一、yuv420讀取為AVFrame
FILE *fp=fopen([yuvPath UTF8String],"rb+");
unsigned char *pic=(unsigned char *)malloc(width*height*3/2);
int i = 0;
while (true)
{
unsigned long ret = fread(pic,1,width*height*3/2,fp);
if(ret<width*height*3/2){
break;
}
memcpy(frame->data[0], pic, width*height);
// frame->linesize[0] = width;
memcpy(frame->data[1], pic+width*height, width*height/4);
// frame->linesize[1] = width/2;
memcpy(frame->data[2], pic+width*height*5/4, width*height/4);
// frame->linesize[2] = width/2;
frame->pts = i++;
if(callBack){
callBack(frame);
}
}
上面可以看到,就是讀取每一個yuv圖片數(shù)據(jù),然后分別寫入
AVFrame->data[0](Y)
AVFrame->data[1](U)
AVFrame->data[2](V)
然后自加AVFrame->pts
二、初始化編碼器
//得到編碼器
AVCodec *codec = avcodec_find_encoder(AV_CODEC_ID_H264);
//得到編碼上下文
c = avcodec_alloc_context3(codec);
//設(shè)置碼流
c->bit_rate = 900000;
//設(shè)置分辨率
c->width = frame->width;
c->height = frame->height;
//設(shè)置timebase
c->time_base = (AVRational){1, 15};
//設(shè)置編碼前的格式
c->pix_fmt = frame->format;
//打開編碼器
ret = avcodec_open2(c, codec, NULL);
上面寫到的,都是必須配置的,還有一些其他參數(shù),可以根據(jù)需求配置。
三、進(jìn)行編碼
ret = avcodec_send_frame(codecCtx, frame);
ret = avcodec_receive_packet(codecCtx, pkt);
av_packet_unref(pkt);
跟解碼的類似。需要注意的是,pkt不可以定義為局部變量。
[self yuv420ToH264:NULL codeCtx:codecCtx callBack:^(AVPacket *pkt) {
fwrite(pkt->data, 1, pkt->size, f);
}];
最后還得傳空值,讓編碼器把剩下的給輸出出來:
四、驗證
只需用ffplay來播放就可以了
ffplay 11_26_30_movie.h264
或者轉(zhuǎn)為mp4文件
ffmpeg -i 09_58_11_movie.h264 -f mp4 my.mp4
如果可以正常播放,不花屏,就證明編碼成功了。
寫到這里,突然產(chǎn)生了一個疑問,如果編碼后得到的AVPacket為一幀編碼后的數(shù)據(jù),我是直接寫到文件的。那么ffplay怎么去區(qū)分每一幀的呢?我看了下內(nèi)存數(shù)據(jù),每一幀是以00000001開頭的。所以,ffplay是以這個作為分割?這個還有待考究。
五、改變大小
如果我想改變編碼后的大小,第一反應(yīng)就是去修改上面提到的碼流值:c->bit_rate = 900000。
其實,影響編碼大小的,不只是碼流,還跟timebase有關(guān)。說白了,ffmpeg就是根據(jù)碼流和timebase來計算每一幀大概的編碼大小。
如果想很好地改變編碼后的大小,可以按下面的步驟:
(1)、得到正確的timebase的值
如果你發(fā)現(xiàn)通過修改bitrate后,編碼后的大小變化不大。那么可能是timebase值設(shè)置不當(dāng)。那么怎么配置正確的timebase呢?可以根據(jù)AVFrame中的timebase,具體你可以看它的pts,dts,duration值,推斷大概的timebase。
比如:
你以幀率為15來算,那么每一幀的時間為1/15秒
1、比如第二幀 pts為1,那么1xtimebase = 1/15,所以timebase為(AVRational){1, 15}
2、比如第二幀pts為1024,同理1024xtimebase = 1/15,所以timebase為1/15除以1024,所以為(AVRational){1, 15360}
(2)、改變bitrate
設(shè)置好timebase后,就可以通過改變bitrate得到想要的大小的。比如碼流為900000,我改為450000,大小就接近減少一半。當(dāng)然也不會無限小,也不會無限大。壓縮都是有個量的。