ijkplayer 基于 FFMpeg 的 ffplay,本文將結(jié)合源碼分析一下 ijkplayer 的基礎(chǔ)實(shí)現(xiàn),如果不了解 FFMpeg 的,建議先看一下 FFMpeg。
播放器基本流程如下:

播放器創(chuàng)建
IJKMediaPlayer.initPlayer
jikplayer_jni.IjkMediaPlayer_native_setup
ijkplayer_android.ijkmp_android_create
ijkplayer.ijkmp_create
ff_ffplay.ffp_create
ijkmeta.ijkmeta_create
以上是播放器創(chuàng)建的一個(gè)流程,主要是在 c 層創(chuàng)建了一個(gè) FFPlayer。
打開(kāi)流以及解復(fù)用流程
IjkMediaPlayer.prepareAsync
ijkplayer_jni._prepareAsync
ijkplayer.ijkmp_prepare_async
ijkplayer.ijkmp_prepare_async_l
ff_ffplay.ffp_prepare_async_l
ff_ffplay.stream_open
ff_ffplay.read_thread
以上流程中重點(diǎn)在 stream_open 和 read_thread,stream_open 進(jìn)行一些初始化,read_thread 打開(kāi)流、解復(fù)用以及創(chuàng)建三個(gè)解碼線程。
stream_open
static VideoState *stream_open(FFPlayer *ffp, const char *filename, AVInputFormat *iformat)
{
assert(!ffp->is);
VideoState *is;
// 創(chuàng)建 VideoState
is = av_mallocz(sizeof(VideoState));
...
#if defined(__ANDROID__)
// 創(chuàng)建 soundtouch,用于變速變調(diào)
if (ffp->soundtouch_enable) {
is->handle = ijk_soundtouch_create();
}
#endif
/* start video display */
// 創(chuàng)建 frame 隊(duì)列,用于存儲(chǔ)解碼后數(shù)據(jù)
if (frame_queue_init(&is->pictq, &is->videoq, ffp->pictq_size, 1) < 0)
goto fail;
if (frame_queue_init(&is->subpq, &is->subtitleq, SUBPICTURE_QUEUE_SIZE, 0) < 0)
goto fail;
if (frame_queue_init(&is->sampq, &is->audioq, SAMPLE_QUEUE_SIZE, 1) < 0)
goto fail;
// 創(chuàng)建 packet 隊(duì)列,用于存儲(chǔ)解復(fù)用后數(shù)據(jù)
if (packet_queue_init(&is->videoq) < 0 ||
packet_queue_init(&is->audioq) < 0 ||
packet_queue_init(&is->subtitleq) < 0)
goto fail;
...
// 初始化 clock ,用于音視頻同步
init_clock(&is->vidclk, &is->videoq.serial);
init_clock(&is->audclk, &is->audioq.serial);
init_clock(&is->extclk, &is->extclk.serial);
...
// 創(chuàng)建視頻渲染線程
is->video_refresh_tid = SDL_CreateThreadEx(&is->_video_refresh_tid, video_refresh_thread, ffp, "ff_vout");
if (!is->video_refresh_tid) {
av_freep(&ffp->is);
return NULL;
}
//創(chuàng)建 read 線程,解復(fù)用以及三個(gè)解碼線程在其中創(chuàng)建
is->initialized_decoder = 0;
is->read_tid = SDL_CreateThreadEx(&is->_read_tid, read_thread, ffp, "ff_read");
...
return is;
fail:
is->initialized_decoder = 1;
is->abort_request = true;
if (is->video_refresh_tid)
SDL_WaitThread(is->video_refresh_tid, NULL);
stream_close(ffp);
return NULL;
}
read_thread
read_thread(void *arg)
{
FFPlayer *ffp = arg;
VideoState *is = ffp->is;
AVFormatContext *ic = NULL;
int err, i, ret __unused;
int st_index[AVMEDIA_TYPE_NB];
AVPacket pkt1, *pkt = &pkt1;
int64_t stream_start_time;
int completed = 0;
int pkt_in_play_range = 0;
AVDictionaryEntry *t;
SDL_mutex *wait_mutex = SDL_CreateMutex();
int scan_all_pmts_set = 0;
int64_t pkt_ts;
int last_error = 0;
int64_t prev_io_tick_counter = 0;
int64_t io_tick_counter = 0;
int init_ijkmeta = 0;
...
//創(chuàng)建 AVFormatContext 對(duì)象,F(xiàn)FMpeg 視頻播放的第一步
ic = avformat_alloc_context();
...
//打開(kāi)視頻流
err = avformat_open_input(&ic, is->filename, is->iformat, &ffp->format_opts);
...
if (!ffp->video_disable)
st_index[AVMEDIA_TYPE_VIDEO] =
av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
st_index[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
if (!ffp->audio_disable)
st_index[AVMEDIA_TYPE_AUDIO] =
av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
st_index[AVMEDIA_TYPE_AUDIO],
st_index[AVMEDIA_TYPE_VIDEO],
NULL, 0);
if (!ffp->video_disable && !ffp->subtitle_disable)
st_index[AVMEDIA_TYPE_SUBTITLE] =
av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
st_index[AVMEDIA_TYPE_SUBTITLE],
(st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
st_index[AVMEDIA_TYPE_AUDIO] :
st_index[AVMEDIA_TYPE_VIDEO]),
NULL, 0);
is->show_mode = ffp->show_mode;
#ifdef FFP_MERGE // bbc: dunno if we need this
if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
AVStream *st = ic->streams[st_index[AVMEDIA_TYPE_VIDEO]];
AVCodecParameters *codecpar = st->codecpar;
AVRational sar = av_guess_sample_aspect_ratio(ic, st, NULL);
if (codecpar->width)
set_default_window_size(codecpar->width, codecpar->height, sar);
}
#endif
/* open the streams */
// stream_component_open,根據(jù)流的類型創(chuàng)建解碼器以及解碼線程 稍后分析
if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
stream_component_open(ffp, st_index[AVMEDIA_TYPE_AUDIO]);
} else {
ffp->av_sync_type = AV_SYNC_VIDEO_MASTER;
is->av_sync_type = ffp->av_sync_type;
}
ret = -1;
if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
ret = stream_component_open(ffp, st_index[AVMEDIA_TYPE_VIDEO]);
}
if (is->show_mode == SHOW_MODE_NONE)
is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
stream_component_open(ffp, st_index[AVMEDIA_TYPE_SUBTITLE]);
}
...
// 循環(huán)從流中讀取數(shù)據(jù)
for (;;) {
...
// 從流中讀取一個(gè) packet
ret = av_read_frame(ic, pkt);
...
// 根據(jù) packet 類型放入相應(yīng)的隊(duì)列
if (pkt->flags & AV_PKT_FLAG_DISCONTINUITY) {
if (is->audio_stream >= 0) {
packet_queue_put(&is->audioq, &flush_pkt);
}
if (is->subtitle_stream >= 0) {
packet_queue_put(&is->subtitleq, &flush_pkt);
}
if (is->video_stream >= 0) {
packet_queue_put(&is->videoq, &flush_pkt);
}
}
...
}
ret = 0;
...
return 0;
}
stream_component_open
static int stream_component_open(FFPlayer *ffp, int stream_index)
{
VideoState *is = ffp->is;
AVFormatContext *ic = is->ic;
AVCodecContext *avctx;
AVCodec *codec = NULL;
const char *forced_codec_name = NULL;
AVDictionary *opts = NULL;
AVDictionaryEntry *t = NULL;
int sample_rate, nb_channels;
int64_t channel_layout;
int ret = 0;
int stream_lowres = ffp->lowres;
..
// 創(chuàng)建解碼器
codec = avcodec_find_decoder(avctx->codec_id);
...
// 根據(jù)流的類型創(chuàng)建輸出設(shè)備以及解碼線程
switch (avctx->codec_type) {
case AVMEDIA_TYPE_AUDIO:
...
/* prepare audio output */
if ((ret = audio_open(ffp, channel_layout, nb_channels, sample_rate, &is->audio_tgt)) < 0)
goto fail;
...
decoder_init(&is->auddec, avctx, &is->audioq, is->continue_read_thread);
if ((is->ic->iformat->flags & (AVFMT_NOBINSEARCH | AVFMT_NOGENSEARCH | AVFMT_NO_BYTE_SEEK)) && !is->ic->iformat->read_seek) {
is->auddec.start_pts = is->audio_st->start_time;
is->auddec.start_pts_tb = is->audio_st->time_base;
}
// 創(chuàng)建解碼線程
if ((ret = decoder_start(&is->auddec, audio_thread, ffp, "ff_audio_dec")) < 0)
goto out;
SDL_AoutPauseAudio(ffp->aout, 0);
break;
case AVMEDIA_TYPE_VIDEO:
...
// 創(chuàng)建渲染管線
if (ffp->async_init_decoder) {
while (!is->initialized_decoder) {
SDL_Delay(5);
}
if (ffp->node_vdec) {
is->viddec.avctx = avctx;
ret = ffpipeline_config_video_decoder(ffp->pipeline, ffp);
}
if (ret || !ffp->node_vdec) {
decoder_init(&is->viddec, avctx, &is->videoq, is->continue_read_thread);
ffp->node_vdec = ffpipeline_open_video_decoder(ffp->pipeline, ffp);
if (!ffp->node_vdec)
goto fail;
}
} else {
decoder_init(&is->viddec, avctx, &is->videoq, is->continue_read_thread);
ffp->node_vdec = ffpipeline_open_video_decoder(ffp->pipeline, ffp);
if (!ffp->node_vdec)
goto fail;
}
// 創(chuàng)建視頻解碼線程
if ((ret = decoder_start(&is->viddec, video_thread, ffp, "ff_video_dec")) < 0)
goto out;
...
break;
case AVMEDIA_TYPE_SUBTITLE:
...
break;
default:
break;
}
goto out;
...
return ret;
}
小結(jié)
- stream_open 進(jìn)行一些參數(shù)、隊(duì)列的初始化,打開(kāi)流并打開(kāi) read_thread 線程
- read_thread 進(jìn)行解復(fù)用,將解出 AVPacket 放入相應(yīng)的隊(duì)列,并根據(jù)流的數(shù)據(jù)不同打開(kāi) stream_component_open
- stream_component_open 根據(jù)流的不同創(chuàng)建相應(yīng)的解碼器并打開(kāi)相應(yīng)的解碼線程
音頻解碼 & 渲染
音頻輸出工具創(chuàng)建
Android 音頻輸出可以使用 AudioTrack 也可以使用 OpenSl,音頻輸出根據(jù)條件創(chuàng)建,具體流程如下:
ijkplayer.ijkmp_prepare_async_l
ff_ffplay.ffp_prepare_async_l
ff_pipeline.ffpipeline_open_audio_output
ffpipeline_android.func_open_audio_output
在音頻解碼、渲染這個(gè)流程中,解碼器和渲染器是生產(chǎn)者消費(fèi)者的關(guān)系,解碼器負(fù)責(zé)解碼并將解碼結(jié)果放入 Frame 隊(duì)列,渲染器負(fù)責(zé)將解碼后的結(jié)果輸出。
解碼
static int audio_thread(void *arg)
{
FFPlayer *ffp = arg;
VideoState *is = ffp->is;
AVFrame *frame = av_frame_alloc();
Frame *af;
...
// 循環(huán)解碼
do {
ffp_audio_statistic_l(ffp);
if ((got_frame = decoder_decode_frame(ffp, &is->auddec, frame, NULL)) < 0)
goto the_end;
if (got_frame) {
tb = (AVRational){1, frame->sample_rate};
// 跳過(guò) seek 以及 avfilter 等代碼
// 從 FrameQueue 中獲取一個(gè)可寫(xiě)的 Frame
if (!(af = frame_queue_peek_writable(&is->sampq)))
goto the_end;
af->pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
af->pos = frame->pkt_pos;
af->serial = is->auddec.pkt_serial;
af->duration = av_q2d((AVRational){frame->nb_samples, frame->sample_rate});
av_frame_move_ref(af->frame, frame);
// 將 Frame 放入隊(duì)列中
frame_queue_push(&is->sampq);
...
} while (ret >= 0 || ret == AVERROR(EAGAIN) || ret == AVERROR_EOF);
// 釋放 frame
av_frame_free(&frame);
return ret;
}
audio_thread 主要是將 AVPacket 解碼成 AVFrame,然后將其轉(zhuǎn)換成 Frame 放入隊(duì)列中等待消費(fèi)者消費(fèi)。
音頻輸出(以 OpenSl 為例)
輸出調(diào)用鏈
ff_ffplay.stream_component_open
ff_ffplay.audio_open
ijksdl_aout.SDL_AoutOpenAudio
ijksdl_aout_android_opensles.aout_open_audio
ijksdl_aout_android_opensles.aout_thread
ijksdl_aout_android_opensles.aout_thread_n
ff_ffplay.sdl_audio_callback
ff_ffplay.audio_decode_frame
其中 aout_open_audio 中進(jìn)行 OpenSLES 的初始化,aout_thread_n 開(kāi)啟一個(gè)循環(huán)等待 OpenSLES 回調(diào)。回調(diào)時(shí)會(huì)從 Frame 隊(duì)列中取出一個(gè) Frame 供 OpenSLES 輸出。
如果有倍速播放等,ff_ffplay.audio_decode_frame 還會(huì)使用 soundtouch 進(jìn)行轉(zhuǎn)換以調(diào)整音頻播放時(shí)長(zhǎng)。
具體代碼就不再一一列出了。
視頻解碼 & 渲染
視頻解碼有兩種方式:硬解、軟解。ijkplayer 中可以根據(jù)配置選擇解碼器。
ffpipeline_android.c
static IJKFF_Pipenode *func_open_video_decoder(IJKFF_Pipeline *pipeline, FFPlayer *ffp)
{
IJKFF_Pipeline_Opaque *opaque = pipeline->opaque;
IJKFF_Pipenode *node = NULL;
if (ffp->mediacodec_all_videos || ffp->mediacodec_avc || ffp->mediacodec_hevc || ffp->mediacodec_mpeg2)
node = ffpipenode_create_video_decoder_from_android_mediacodec(ffp, pipeline, opaque->weak_vout);
if (!node) {
node = ffpipenode_create_video_decoder_from_ffplay(ffp);
}
return node;
}
視頻解碼
根據(jù)解碼方式的不同也會(huì)調(diào)用不同的方法,具體方法軟解是 ff_ffplay 中的 ffplay_video_thread,硬解是 ffpipenode_android_mediacodec_vdec 中的 func_run_sync
。
軟解
static int ffplay_video_thread(void *arg)
{
FFPlayer *ffp = arg;
VideoState *is = ffp->is;
AVFrame *frame = av_frame_alloc();
double pts;
double duration;
...
// 循環(huán)解碼
for (;;) {
// 解碼一幀
ret = get_video_frame(ffp, frame);
// 圖片入隊(duì)
ret = queue_picture(ffp, frame, pts, duration, frame->pkt_pos, is->viddec.pkt_serial);
av_frame_unref(frame);
}
...
return 0;
}
static int get_video_frame(FFPlayer *ffp, AVFrame *frame)
{
VideoState *is = ffp->is;
int got_picture;
// 解碼
if ((got_picture = decoder_decode_frame(ffp, &is->viddec, frame, NULL)) < 0)
return -1;
...
return got_picture;
}
static int decoder_decode_frame(FFPlayer *ffp, Decoder *d, AVFrame *frame, AVSubtitle *sub) {
int ret = AVERROR(EAGAIN);
// 結(jié)果不是 error 的情況下,循環(huán)獲取 frame
for (;;) {
AVPacket pkt;
if (d->queue->serial == d->pkt_serial) {
do {
switch (d->avctx->codec_type) {
case AVMEDIA_TYPE_VIDEO:
ret = avcodec_receive_frame(d->avctx, frame);
...
break;
case AVMEDIA_TYPE_AUDIO:
ret = avcodec_receive_frame(d->avctx, frame);
...
break;
default:
break;
}
...
} while (ret != AVERROR(EAGAIN));
}
...
if (pkt.data == flush_pkt.data) {
...
} else {
if (d->avctx->codec_type == AVMEDIA_TYPE_SUBTITLE) {
...
} else {
// 送入一個(gè) packet
if (avcodec_send_packet(d->avctx, &pkt) == AVERROR(EAGAIN)) {
av_log(d->avctx, AV_LOG_ERROR, "Receive_frame and send_packet both returned EAGAIN, which is an API violation.\n");
d->packet_pending = 1;
av_packet_move_ref(&d->pkt, &pkt);
}
}
av_packet_unref(&pkt);
}
}
}
decoder_decode_frame 中會(huì)根據(jù)送入的 Packet 類型輸出相應(yīng)類型的 Frame,因?yàn)?I、B、P 不同類型的幀的存在,有時(shí)候輸入之后沒(méi)有輸出,有時(shí)候輸入一個(gè) packet 會(huì)有多幀輸出,所以這里寫(xiě)了一個(gè)循環(huán)持續(xù)的獲取數(shù)據(jù)。
硬解
MediaCodec 主要流程如下:
- MediaCodec.queueInputBuffer 給 MediaCodec 喂數(shù)據(jù)
- MediaCodec.dequeueOutputBuffer 從 MediaCodec 中獲取解碼數(shù)據(jù)
- 將解碼后的 Frame 存入 FrameQueue
MediaCodec 具體的代碼以及使用方法以后有時(shí)間再分析。
音視頻同步 & 倍速播放
音視頻同步
音視頻同步一般以音頻作為主時(shí)鐘,視頻向音頻看齊,本文也將只分析這種情況。以視頻作為主時(shí)鐘或以外部時(shí)鐘作為主時(shí)鐘的以后有時(shí)間再分析。
typedef struct VideoState {
...
Clock audclk;
Clock vidclk;
Clock extclk;
...
} VideoState;
typedef struct Clock {
double pts; /* clock base */
double pts_drift; /* clock base minus time at which we updated the clock */
double last_updated;
double speed;
int serial; /* clock is based on a packet with this serial */
int paused;
int *queue_serial; /* pointer to the current packet queue serial, used for obsolete clock detection */
} Clock;
VideoState 中定義了三個(gè) Clock,這三個(gè) Clock 在 stream_open 時(shí)被初始化。音頻播放的時(shí)候會(huì)在 sdl_audio_callback 中修改 audclk。
ff_ffplay.c
sdl_audio_callback(void *opaque, Uint8 *stream, int len)
{
...
if (!isnan(is->audio_clock)) {
set_clock_at(&is->audclk, is->audio_clock - (double)(is->audio_write_buf_size) / is->audio_tgt.bytes_per_sec - SDL_AoutGetLatencySeconds(ffp->aout), is->audio_clock_serial, ffp->audio_callback_time / 1000000.0);
sync_clock_to_slave(&is->extclk, &is->audclk);
}
...
}
ff_ffplay.c
static void set_clock_at(Clock *c, double pts, int serial, double time)
{
c->pts = pts;
c->last_updated = time;
c->pts_drift = c->pts - time;
c->serial = serial;
}
其中 last_updated 時(shí)當(dāng)前相對(duì)時(shí)間,pts_drift 是 pts 和當(dāng)前相對(duì)時(shí)間的差值。
video_refresh 中會(huì)根據(jù)視頻當(dāng)前幀的 duration、視頻 clock、音頻 clock 計(jì)算出一個(gè) remaining_time,然后通過(guò)這個(gè) remaining_time 來(lái)控制下一幀的開(kāi)始播放時(shí)間。
remaining_time)
{
double time;
if (is->video_st) {
retry:
if (frame_queue_nb_remaining(&is->pictq) == 0) {
// nothing to do, no picture to display in the queue
} else {
double last_duration, duration, delay;
Frame *vp, *lastvp;
/* dequeue the picture */
lastvp = frame_queue_peek_last(&is->pictq);
vp = frame_queue_peek(&is->pictq);
if (vp->serial != is->videoq.serial) {
frame_queue_next(&is->pictq);
goto retry;
}
if (lastvp->serial != vp->serial)
is->frame_timer = av_gettime_relative() / 1000000.0;
if (is->paused)
goto display;
/* compute nominal last_duration */
- // double duration = nextvp->pts - vp->pts;
last_duration = vp_duration(is, lastvp, vp);
delay = compute_target_delay(ffp, last_duration, is);
time= av_gettime_relative()/1000000.0;
if (isnan(is->frame_timer) || time < is->frame_timer)
is->frame_timer = time;
if (time < is->frame_timer + delay) {
*remaining_time = FFMIN(is->frame_timer + delay - time, *remaining_time);
goto display;
}
is->frame_timer += delay;
if (delay > 0 && time - is->frame_timer > AV_SYNC_THRESHOLD_MAX)
is->frame_timer = time;
SDL_LockMutex(is->pictq.mutex);
if (!isnan(vp->pts))
update_video_pts(is, vp->pts, vp->pos, vp->serial);
SDL_UnlockMutex(is->pictq.mutex);
// 丟幀
if (frame_queue_nb_remaining(&is->pictq) > 1) {
Frame *nextvp = frame_queue_peek_next(&is->pictq);
duration = vp_duration(is, vp, nextvp);
//time > is->frame_timer + duration 上一次刷新時(shí)間加上 duration 仍然小于相對(duì)時(shí)間,說(shuō)明視頻超前,丟幀 retry 下一幀
if(!is->step && (ffp->framedrop > 0 || (ffp->framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER))
&& time > is->frame_timer + duration) {
frame_queue_next(&is->pictq);
goto retry;
}
}
...
}
display:
/* display picture */
if (!ffp->display_disable && is->force_refresh && is->show_mode == SHOW_MODE_VIDEO && is->pictq.rindex_shown)
video_display2(ffp);
}
...
}
}
static double compute_target_delay(FFPlayer *ffp, double delay, VideoState *is)
{
double sync_threshold, diff = 0;
/* update delay to follow master synchronisation source */
if (get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER) {
/* if video is slave, we try to correct big delays by
duplicating or deleting a frame */
diff = get_clock(&is->vidclk) - get_master_clock(is);
/* skip or repeat frame. We take into account the
delay to compute the threshold. I still don't know
if it is the best guess */
sync_threshold = FFMAX(AV_SYNC_THRESHOLD_MIN, FFMIN(AV_SYNC_THRESHOLD_MAX, delay));
/* -- by bbcallen: replace is->max_frame_duration with AV_NOSYNC_THRESHOLD */
if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD) {
// diff 小于 0 說(shuō)明視頻慢于音頻,需要減少視頻顯示時(shí)間來(lái)完成同步,但 delay 不能小于 0
if (diff <= -sync_threshold)
delay = FFMAX(0, delay + diff);
// delay 當(dāng)前幀需要顯示的時(shí)間,AV_SYNC_FRAMEDUP_THRESHOLD 是一個(gè)閥值
// 也就是說(shuō)如果當(dāng)前幀顯示時(shí)間過(guò)長(zhǎng),則不復(fù)制幀同步
else if (diff >= sync_threshold && delay > AV_SYNC_FRAMEDUP_THRESHOLD)
delay = delay + diff;
// diff 大于閥值,也就是說(shuō)視頻快于音頻,當(dāng)前幀顯示時(shí)間加倍以縮小音視頻差距
else if (diff >= sync_threshold)
delay = 2 * delay;
}
}
...
return delay;
}
小結(jié)
- 音視頻同步以音頻為主時(shí)鐘
- 視頻通過(guò) remaining_time 控制每幀顯示時(shí)間
- remaining_time 計(jì)算公式為 frame_timer + delay - time,其中 frame_timer 為上一幀開(kāi)始展示時(shí)間,delay 為結(jié)合當(dāng)前幀顯示時(shí)間以及視頻時(shí)鐘同主時(shí)鐘差距總和計(jì)算出來(lái)的一個(gè)值。
倍速播放
音視頻同步視頻這塊只通過(guò) remaining_time 控制每幀顯示時(shí)間,speed、playback_rate 相關(guān)的參數(shù)在視頻播放這塊完全沒(méi)有用到,那么怎么進(jìn)行的倍速播放?
答案是視頻同步音頻,音頻播放速度加快了,視頻播放速度也會(huì)加快,這樣就可能導(dǎo)致某些幀被跳過(guò)以加快視頻播放速度。速度變慢也類似,音頻變慢,那么視頻快于音頻,這是會(huì)通過(guò)增加 delay 控制播放速度。