FFmpeg學習之開發(fā)Mac播放器(八):使用AudioUnit播放AV_SAMPLE_FMT_FLTP格式數(shù)據(jù)

使用FFmpeg解碼的PCM音頻數(shù)據(jù)是以一定格式存放的,包含在codec_ctx->sample_fmt中。使用AudioUnit可以直接播放FFmpeg中AV_SAMPLE_FMT_S16、AV_SAMPLE_FMT_S16P、AV_SAMPLE_FMT_FLT和AV_SAMPLE_FMT_FLTP格式的PCM數(shù)據(jù)。

//通過AUGraph來創(chuàng)建AudioUnit
- (OSStatus)setupAudioUnitWithStreamDescription:(AudioStreamBasicDescription)streamDescription {
    //iOS系統(tǒng)下需要先設(shè)置AVAudioSession,MacOS下不需要
    OSStatus status = NewAUGraph(&_graph);
    if (status != noErr) {
        NSLog(@"Can not create new graph");
        return status;
    }

    AudioComponentDescription description;
    bzero(&description, sizeof(description));
    description.componentType = kAudioUnitType_Output;
    //kAudioUnitSubType_HALOutput這個子類型是MacOS系統(tǒng),iOS應(yīng)該使用kAudioUnitSubType_RemoteIO
    description.componentSubType = kAudioUnitSubType_HALOutput;
    description.componentManufacturer = kAudioUnitManufacturer_Apple;

    status = AUGraphAddNode(_graph, &description, &_node);
    if (status != noErr) {
        NSLog(@"Can not add node");
        return status;
    }

    status = AUGraphOpen(_graph);
    if (status != noErr) {
        NSLog(@"Can not open graph");
        return status;
    }

    status = AUGraphNodeInfo(_graph, _node, NULL, &_unit);
    if (status != noErr) {
        NSLog(@"Can not get node info");
        return status;
    }
    //通過AudioStreamBasicDescription設(shè)置輸入數(shù)據(jù)的格式
    status = AudioUnitSetProperty(_unit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &streamDescription, sizeof(streamDescription));
    if (status != noErr) {
        NSLog(@"Can not set stream format on unit input scope");
        return status;
    }
    //設(shè)置填充數(shù)據(jù)的回調(diào)
    AURenderCallbackStruct callbackStruct;
    callbackStruct.inputProc = &InputRenderCallback;
    callbackStruct.inputProcRefCon = (__bridge void *)self;
    status = AudioUnitSetProperty(_unit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input, 0, &callbackStruct, sizeof(callbackStruct));
    if (status != noErr) {
        NSLog(@"Fail to set render callback");
        return status;
    }

    status = AUGraphInitialize(_graph);
    if (status != noErr) {
        NSLog(@"Can not initialize graph");
        return status;
    }

    return status;
}
    設(shè)置AV_SAMPLE_FMT_FLTP格式數(shù)據(jù)需要的AudioStreamBasicDescription
    AudioStreamBasicDescription streamDescription;
    bzero(&streamDescription, sizeof(streamDescription));
    streamDescription.mFormatID = kAudioFormatLinearPCM;
    /*
    AV_SAMPLE_FMT_S16   kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked
    AV_SAMPLE_FMT_S16P  kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsNonInterleaved
    AV_SAMPLE_FMT_FLT  kAudioFormatFlagIsFloat | kAudioFormatFlagIsPacked
    AV_SAMPLE_FMT_FLTP  kAudioFormatFlagIsFloat | kAudioFormatFlagIsNonInterleaved
    */
    streamDescription.mFormatFlags = kAudioFormatFlagIsFloat | kAudioFormatFlagIsNonInterleaved;
    streamDescription.mSampleRate = 44100.0;
    streamDescription.mChannelsPerFrame = codec_ctx->channels;
    streamDescription.mFramesPerPacket = 1;
    //Float類型占4個字節(jié),32比特,SignedInteger類型占2個字節(jié),16比特
    streamDescription.mBitsPerChannel = 32;
    //如果是左右聲道分開存儲是字節(jié)數(shù),左右聲道交叉存儲是字節(jié)數(shù)X2
    streamDescription.mBytesPerFrame = 4;
    streamDescription.mBytesPerPacket = 4;
//解碼音頻數(shù)據(jù)寫入到文件中
- (void)decodeAudioData {
    AVPacket packet;
    av_init_packet(&packet);
    while ((av_read_frame(ifmt_ctx, &packet)) >= 0) {
        if (packet.stream_index == audio_stream_index) {
            int ret = avcodec_send_packet(codec_ctx, &packet);
            while (ret >= 0) {
                AVFrame * frame = av_frame_alloc();
                ret = avcodec_receive_frame(codec_ctx, frame);
                if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
                    break;
                } else if (ret < 0) {
                    NSLog(@"Error during ecoding");
                    break;
                }
                int data_size = av_samples_get_buffer_size(frame->linesize, 1, frame->nb_samples, AV_SAMPLE_FMT_FLTP, 0);
                fwrite(frame->data[0], 1, data_size, file1);
                fwrite(frame->data[1], 1, data_size, file2);
                av_frame_free(&frame);
            }
        }
    }
    if (file1) {
        fseek(file1, 0, SEEK_SET);
    }
    if (file2) {
        fseek(file2, 0, SEEK_SET);
    }
}
//填充音頻數(shù)據(jù)的回調(diào)
- (OSStatus)renderData:(AudioBufferList *)ioData atTimeStamp:(const AudioTimeStamp *)timeStamp forElement:(UInt32)element numberFrames:(UInt32)numFrames flags:(AudioUnitRenderActionFlags *)flags {
    for (int iBuffer = 0; iBuffer < ioData->mNumberBuffers; iBuffer++) {
        memset(ioData->mBuffers[iBuffer].mData, 0, ioData->mBuffers[iBuffer].mDataByteSize);
    }
    FILE * files[] = {file1, file2};
    for (int iBuffer = 0; iBuffer < ioData->mNumberBuffers; iBuffer++) {
        //這里我偷懶直接把左右聲道的數(shù)據(jù)分別寫入到兩個文件中,在這里從文件中讀取mDataByteSize個數(shù)據(jù),F(xiàn)Fmpeg解碼出的數(shù)據(jù)大小和mDataByteSize可能相同需要做處理
        fread(ioData->mBuffers[iBuffer].mData, ioData->mBuffers[iBuffer].mDataByteSize, 1, files[iBuffer]);
    }
    return noErr;
}

static OSStatus InputRenderCallback(void * inRefCon, AudioUnitRenderActionFlags * ioActionFlags, const AudioTimeStamp * inTimeStamp, UInt32 inBusNumber, UInt32 inNumberFrames, AudioBufferList * ioData) {
    ViewController * viewController = (__bridge ViewController *)inRefCon;
    return [viewController renderData:ioData atTimeStamp:inTimeStamp forElement:inBusNumber numberFrames:inNumberFrames flags:ioActionFlags];
}

我在Demo中把PCM數(shù)據(jù)用AVFilter分別轉(zhuǎn)成了AV_SAMPLE_FMT_S16、AV_SAMPLE_FMT_S16P、AV_SAMPLE_FMT_FLT和AV_SAMPLE_FMT_FLTP用AudioUnit進行播放
github

?著作權(quán)歸作者所有,轉(zhuǎn)載或內(nèi)容合作請聯(lián)系作者
【社區(qū)內(nèi)容提示】社區(qū)部分內(nèi)容疑似由AI輔助生成,瀏覽時請結(jié)合常識與多方信息審慎甄別。
平臺聲明:文章內(nèi)容(如有圖片或視頻亦包括在內(nèi))由作者上傳并發(fā)布,文章內(nèi)容僅代表作者本人觀點,簡書系信息發(fā)布平臺,僅提供信息存儲服務(wù)。

相關(guān)閱讀更多精彩內(nèi)容

友情鏈接更多精彩內(nèi)容