最近需要做實(shí)時(shí)錄屏并把視頻推流到RTSP服務(wù)器,具體流程是抓取屏幕內(nèi)容(bitmap),并把bitmap轉(zhuǎn)化為YUV,接著把YUV編碼成H264,再把H264碼流推到RTSP服務(wù)器;把采集到的PCM編碼為AAC,再把AAC推流至RTSP服務(wù)器。

看了雷神的一篇文章:最簡單的基于FFmpeg的推流器(以推送RTMP為例),他是把本地的視頻文件推流至RTMP服務(wù)器,并不符合我的要求。
接著我找到另一篇文章:ffmpeg實(shí)現(xiàn)H264壓縮并且推流至RTSP,這篇文章只有圖像編碼,并沒有音頻編碼,并且推流之后并沒有播放成功。
我綜合上面兩位大佬的思路,和查找一些資料實(shí)現(xiàn)了這個(gè)功能。
RTSP服務(wù)器使用的是HappyTime的免費(fèi)試用版本。
1. bitmap轉(zhuǎn)YUV
我抓到的bitmap是BGRA格式的,所以使用的圖像格式是AV_PIX_FMT_BGRA,cropImage是含有rgba圖像的數(shù)組
bool init_RGB_to_YUV(){
//BGRA 轉(zhuǎn) YUV
swrCtxBGRA2YUV = sws_getContext(
cap_w, cap_h, AV_PIX_FMT_BGRA,
cap_w, cap_h, AV_PIX_FMT_YUV420P,
SWS_BICUBIC,
NULL, NULL, NULL
);
//創(chuàng)建BGRA幀
frame_bgra = av_frame_alloc();
frame_bgra->format = AV_PIX_FMT_BGRA;
frame_bgra->width = cap_w;
frame_bgra->height = cap_h;
if (av_frame_get_buffer(frame_bgra, 32) < 0) {
printf("Failed: av_frame_get_buffer\n");
return false;
}
frame_bgra->data[0] = cropImage;
//YUV幀
frame_yuv = av_frame_alloc();
frame_yuv->width = cap_w;
frame_yuv->height = cap_h;
frame_yuv->format = AV_PIX_FMT_YUV420P;
//
uint8_t *picture_buf = (uint8_t *)av_malloc(cap_w * cap_h * 1.5);
if (av_image_fill_arrays(frame_yuv->data, frame_yuv->linesize, picture_buf, AV_PIX_FMT_YUV420P, cap_w, cap_h, 1) < 0){
printf("Failed: av_image_fill_arrays\n");
return false;
}
return true;
}
調(diào)用:
//BGRA 轉(zhuǎn) YUV
if (sws_scale(swrCtxBGRA2YUV,
frame_bgra->data, frame_bgra->linesize,
0, cap_h,
frame_yuv->data, frame_yuv->linesize) < 0)
{
printf("失?。築GRA 轉(zhuǎn) YUV\n");
return;
}
frame_yuv->pts = av_gettime();
由于我是實(shí)時(shí)抓取的屏幕,frame_yuv->pts設(shè)為當(dāng)前的時(shí)間戳,以保證能正常播放。
2. H264編碼
bool init_YUV_to_H264(){
//尋找編碼器
codec_h264 = avcodec_find_encoder(AV_CODEC_ID_H264);
if (!codec_h264){
printf("Fail: avcodec_find_encoder\n");
return false;
}
//編碼器上下文
codec_ctx_h264 = avcodec_alloc_context3(codec_h264);
if (!codec_ctx_h264){
printf("Fail: avcodec_alloc_context3\n");
return false;
}
codec_ctx_h264->pix_fmt = AV_PIX_FMT_YUV420P;
codec_ctx_h264->codec_type = AVMEDIA_TYPE_VIDEO;
codec_ctx_h264->width = cap_w;
codec_ctx_h264->height = cap_h;
codec_ctx_h264->channels = 3;
codec_ctx_h264->time_base = { 1, 25 };
codec_ctx_h264->gop_size = 5; //圖像組兩個(gè)關(guān)鍵幀(I幀)的距離
codec_ctx_h264->max_b_frames = 0;
//codec_ctx_h264->qcompress = 0.6;
//codec_ctx_h264->bit_rate = 90000;
codec_ctx_h264->flags |= AV_CODEC_FLAG_GLOBAL_HEADER; //添加PPS、SPS
av_opt_set(codec_ctx_h264->priv_data, "preset", "ultrafast", 0); //快速編碼,但會(huì)損失質(zhì)量
//av_opt_set(codec_ctx_h264->priv_data, "tune", "zerolatency", 0); //適用于快速編碼和低延遲流式傳輸,但是會(huì)出現(xiàn)綠屏
//av_opt_set(codec_ctx_h264->priv_data, "x264opts", "crf=26:vbv-maxrate=728:vbv-bufsize=3640:keyint=25", 0);
//打開編碼器
if (avcodec_open2(codec_ctx_h264, codec_h264, NULL) < 0){
printf("Fail: avcodec_open2\n");
return false;
}
//用于接收編碼好的H264
pkt_h264 = av_packet_alloc();
return true;
}
調(diào)用:
ret = avcodec_send_frame(codec_ctx_h264, frame_yuv);
if (ret < 0){
printf("send frame fail\n");
return;
}
while (ret >= 0) {
ret = avcodec_receive_packet(codec_ctx_h264, pkt_h264);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF){
break;
}
if (ret < 0){
printf("Error during encoding\n");
break;
}
pkt_h264->stream_index = videoindex;
//printf("pkt_h264 timestamp = %d\n", pkt_h264->pts);
if (av_interleaved_write_frame(fmt_ctx, pkt_h264) < 0) {
printf("Error muxing packet\n");
}
av_packet_unref(pkt_h264);
}
3. AAC編碼
bool init_PCM_to_AAC(){
codec_aac = avcodec_find_encoder(AV_CODEC_ID_AAC);
if (!codec_aac) {
printf("avcodec_find_encoder fail\n");
return false;
}
codec_ctx_aac = avcodec_alloc_context3(codec_aac);
if (!codec_ctx_aac) {
printf("avcodec_find_encoder fail\n");
return false;
}
codec_ctx_aac->sample_fmt = AV_SAMPLE_FMT_FLT;
codec_ctx_aac->codec_type = AVMEDIA_TYPE_AUDIO;
codec_ctx_aac->channels = channels;
codec_ctx_aac->channel_layout = av_get_default_channel_layout(channels);
codec_ctx_aac->sample_rate = sample_rete;
if (avcodec_open2(codec_ctx_aac, codec_aac, NULL) < 0) {
printf("open codec fail\n");
return false;
}
swrCtxS162FLT = swr_alloc_set_opts(NULL,
codec_ctx_aac->channel_layout, codec_ctx_aac->sample_fmt, codec_ctx_aac->sample_rate,
codec_ctx_aac->channel_layout, AV_SAMPLE_FMT_S16, codec_ctx_aac->sample_rate,
0, 0);
if (!swrCtxS162FLT)
{
printf("swr_alloc_set_opts error\n");
return false;
}
if (swr_init(swrCtxS162FLT) < 0) {
printf("open resample fail\n");
return false;
}
frame_pcm = av_frame_alloc();
frame_pcm->nb_samples = nbSamples_; //一幀音頻存放的樣本數(shù)量
frame_pcm->format = codec_ctx_aac->sample_fmt;
frame_pcm->channels = codec_ctx_aac->channels;
frame_pcm->channel_layout = codec_ctx_aac->channel_layout;
if (av_frame_get_buffer(frame_pcm, 0) < 0) {
printf("av_frame_get_buffer error\n");
return false;
}
pkt_aac = av_packet_alloc();
return true;
}
調(diào)用:
其中pcm_buff是包含pcm數(shù)據(jù)的數(shù)組
const uint8_t *pcm[1];
pcm[0] = pcm_buff;
int len = swr_convert(swrCtxS162FLT,
frame_pcm->data, frame_pcm->nb_samples,
pcm, nbSamples_);
if (len <= 0) {
printf("---Encodec:PCM->AAC--- swr_convert fail \n");
return;
}
frame_pcm->pts = av_gettime();
//printf("channels = %d\n", frame_pcm->channels);
//printf("framePCM->linesize = %6d %6d\n", frame_pcm->linesize[0], frame_pcm->linesize[1]);
//AAC編碼
int ret = avcodec_send_frame(codec_ctx_aac, frame_pcm);
if (ret < 0){
printf("send frame fail\n");
return;
}
ret = avcodec_receive_packet(codec_ctx_aac, pkt_aac);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF){
return;
}
if (ret < 0){
printf("Error during encoding\n");
return;
}
pkt_aac->stream_index = audioindex;
//printf("pkt_aac timestamp = %d\n", pkt_aac->pts);
if (av_interleaved_write_frame(fmt_ctx, pkt_aac) < 0) {
printf("Error muxing packet\n");
}
av_packet_unref(pkt_aac);
4. 推流器
使用udp傳輸時(shí)傳到1400多幀就斷開鏈接了,原因不明,所以改用使用tcp協(xié)議傳輸
bool init_rtsp_pusher(){
//RTSP
if (avformat_alloc_output_context2(&fmt_ctx, NULL, "RTSP", RTSP_URL.c_str()) < 0){
printf("Fail: avformat_alloc_output_context2\n");
return false;
}
//使用tcp協(xié)議傳輸
av_opt_set(fmt_ctx->priv_data, "rtsp_transport", "tcp", 0);
//檢查所有流是否都有數(shù)據(jù),如果沒有數(shù)據(jù)會(huì)等待max_interleave_delta微秒
fmt_ctx->max_interleave_delta = 1000000;
//輸出視頻流
AVStream *video_s = avformat_new_stream(fmt_ctx, codec_h264);
if (!video_s){
printf("Fail: avformat_new_stream\n");
return false;
}
video_s->time_base = { 1, 25 };
videoindex = video_s->id = fmt_ctx->nb_streams - 1; //加入到fmt_ctx流
//復(fù)制AVCodecContext的設(shè)置
if (avcodec_copy_context(video_s->codec, codec_ctx_h264) < 0) {
printf("Fail: avcodec_copy_context\n");
return false;
}
video_s->codec->codec_tag = 0;
if (fmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
video_s->codec->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
avcodec_parameters_from_context(video_s->codecpar, codec_ctx_h264);
//輸出音頻流
AVStream *audio_s = avformat_new_stream(fmt_ctx, codec_ctx_aac->codec);
if (!audio_s){
printf("Fail: avformat_new_stream\n");
return false;
}
audio_s->time_base = { 1, 25 };
audioindex = audio_s->id = fmt_ctx->nb_streams - 1;
//復(fù)制AVCodecContext的設(shè)置
if (avcodec_copy_context(audio_s->codec, codec_ctx_aac) < 0) {
printf("Fail: avcodec_copy_context\n");
return false;
}
audio_s->codec->codec_tag = 0;
if (fmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
audio_s->codec->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
avcodec_parameters_from_context(audio_s->codecpar, codec_ctx_aac);
//printf("fmt_ctx nb_streams = %d\n", fmt_ctx->nb_streams);
av_dump_format(fmt_ctx, 0, fmt_ctx->filename, 1);
if (!(fmt_ctx->oformat->flags & AVFMT_NOFILE)) { //???
//打開輸出URL(Open output URL)
if (avio_open(&fmt_ctx->pb, fmt_ctx->filename, AVIO_FLAG_WRITE) < 0) {
printf("Fail: avio_open('%s')\n", fmt_ctx->filename);
return false;
}
}
return true;
}
結(jié)果

延遲有1.5秒左右
參考:
https://blog.csdn.net/leixiaohua1020/article/details/39803457
https://blog.csdn.net/yunge812/article/details/79345584
https://trac.ffmpeg.org/wiki