相關(guān)
視頻疊加算法-白色素材疊加
視頻疊加算法-彩色素材疊加
視頻疊加算法-彩色加亮融合
視頻疊加算法-彩色均值融合
引言
如果想在之上疊加一個靜止圖片很簡單,像ffmpeg的濾鏡、opencv等都能實現(xiàn)。但是假如文字擁有動畫,而且文字出現(xiàn)比較頻繁,全部使用序列的png圖像會很大。例如如下的素材:

黑色素材
雖然與白色素材疊加算法中所用素材相同,但目的不同,以下demo將素材以“黑色部分疊加,白色區(qū)域透明”的效果疊加到視頻之上。
算法實現(xiàn)
原視頻:

input
#include <stdio.h>
#include <math.h>
#ifdef __cplusplus
extern "C" {
#endif
#include <libavformat/avformat.h>
#include <libavcodec/avcodec.h>
#include <libavutil/frame.h>
#include <libavutil/opt.h>
#include <libavutil/imgutils.h>
#include <libavutil/pixfmt.h>
#ifdef __cplusplus
};
#endif
int frame_cover_black( AVFrame* dst_frame, AVFrame* src_frame, AVFrame* cover_frame);
int init_frame(AVFrame* frame,int width,int height,uint8_t* dst_buff);
int mergeyuv(char* file, char* light,char* lightout,int width,int height);
int write_yuvframe(AVFrame *pFrame,FILE *out);
int main (char** args, int argv)
{
char* file = "test.yuv";
char* light = "light.yuv";
char* lightout = "lightout.yuv";
int width = 480;
int height = 480;
mergeyuv(file,light,lightout,width,height);
return 0;
}
int mergeyuv(char* file, char* light,char* lightout,int width,int height)
{
AVFrame* readframe,*lightframe,*outframe;
readframe = av_frame_alloc();
lightframe = av_frame_alloc();
outframe = av_frame_alloc();
FILE* readfile = (FILE*)fopen(file,"rb");
FILE* lightfile = (FILE*)fopen(light,"rb");
FILE* outfile = (FILE*)fopen(lightout,"wb+");
if(lightfile==NULL||readframe==NULL||outfile==NULL)
return -1;
int length = width*height*3/2;
uint8_t* readbuff = (uint8_t*)malloc(length);
uint8_t* lightbuff = (uint8_t*)malloc(length);
uint8_t* outbuff = (uint8_t*)malloc(length);
init_frame(readframe,width,height,readbuff);
init_frame(lightframe,width,height,lightbuff);
init_frame(outframe,width,height,outbuff);
while(fread(readbuff,1,length,readfile))
{
if(fread(lightbuff,1,length,lightfile))
{
puts("mrege one frame");
frame_cover_black(readframe,readframe,lightframe);
write_yuvframe(readframe,outfile);
}
else
break;
}
fclose(readfile);
fclose(lightfile);
fclose(outfile);
free(readbuff);
free(lightbuff);
free(outbuff);
av_frame_free(&readframe);
av_frame_free(&lightframe);
av_frame_free(&outframe);
return 0;
}
int init_frame(AVFrame* frame,int width,int height,uint8_t* dst_buff)
{
if(!avpicture_fill((AVPicture *) frame, dst_buff, AV_PIX_FMT_YUV420P,width,height))
{
puts("init frame error");
av_frame_free(&frame);
return NULL;
}
frame->width=width;
frame->height=height;
frame->format = AV_PIX_FMT_YUV420P;
return 0;
}
int frame_cover_black( AVFrame* dst_frame, AVFrame* src_frame, AVFrame* cover_frame)
{
if(dst_frame == NULL || src_frame == NULL || cover_frame == NULL)
{
puts("frame_cover_black input or output frame is NULL");
return -1;
}
int w2 = cover_frame->width;
int h2 = cover_frame->height;
int i = 0, j = 0;
int a, a2;
float rat;
for(i = 0; i < h2; i++)
{
for(j = 0; j < w2; j++)
{
a2 = cover_frame->data[0][i * cover_frame->linesize[0] + j];
a2 = a2 >= 0 ? a2 : 256 + a2;
if(a2 <= 32)
{
dst_frame->data[1][(j >> 1) + (int)(i >> 1) * (dst_frame->linesize[1])] = cover_frame->data[1][(j >> 1) + (int)(i >> 1) * (cover_frame->linesize[1])];
dst_frame->data[2][(j >> 1) + (int)(i >> 1) * (dst_frame->linesize[2])] = cover_frame->data[2][(j >> 1) + (int)(i >> 1) * (cover_frame->linesize[2])];
dst_frame->data[0][i * dst_frame->linesize[0] + j] = 16;
continue;
}
a = src_frame->data[0][i * src_frame->linesize[0] + j];
a = a >= 0 ? a : 256 + a;
rat = (double)a2 / 256;
rat = rat > 1 ? 1 : rat;
dst_frame->data[0][i * dst_frame->linesize[0] + j] = a * rat;
}
}
return 0;
}
int write_yuvframe(AVFrame *pFrame,FILE *out)
{
int height = pFrame->height,width = pFrame->width;
if(pFrame==NULL)
{
puts("error:write frame is null");
return -1;
}
if(out == NULL)
{
puts("give write file is null");
return -1;
}
int j = 0;
for (j = 0; j < height; j++)
fwrite(pFrame->data[0] + j * pFrame->linesize[0], 1, width, out);
for (j = 0; j < height / 2; j++)
fwrite(pFrame->data[1] + j * pFrame->linesize[1], 1, width / 2, out);
for (j = 0; j < height / 2; j++)
fwrite(pFrame->data[2] + j * pFrame->linesize[2], 1, width / 2,out);
return 0;
}
這是效果:

output
注:
選用16作拐點的話,會出現(xiàn)大量泛白區(qū)域,所以選用32作為拐點來分離出黑色區(qū)域。但是同樣會忽視某些細(xì)節(jié)。當(dāng)然,很可能這些細(xì)節(jié)是由于編碼的“有損”而產(chǎn)生的。使用200 作為全透明峰值。
y為素材視頻對應(yīng)點的Y值,d(包含uv)為輸出幀的數(shù)據(jù)
if y< 32
d設(shè)置為黑色
else if y< 200
d按比例趨近黑色
else
忽視素材疊加,取原幀對應(yīng)點數(shù)據(jù)
具體解釋參看視頻疊加算法-白色素材疊加
三 待改進
- 應(yīng)該將素材視頻生成的尺寸縮小,通過指定坐標(biāo)的方法融合,從而提升算法效率。
2 會忽略素材視頻中的細(xì)節(jié),最終視頻中有鋸齒。
3 對于半透明處,也就是算法中 d按比例趨近黑色處,該計算方法會使得輸出視頻透明略顯生硬,梯度并不明顯,該處計算方法待改進。