在iOS8.0以后開放了,對圖片的硬編碼,要引入
@import VideoToolbox;
以下是介紹使用VTCompressionSession對接收視頻數(shù)據(jù)的壓縮編碼;
apple developer來源
以下是步驟:
1:創(chuàng)建 一個(gè)compression sessioin
VTCompressionSessionCreate(CFAllocatorRef _Nullable allocator,//分配器,如果使用NULL的話,就使用默認(rèn)的.
int32_t width,//視頻幀的象素寬
int32_t height,//視頻幀的象素高
CMVideoCodecType codecType,//編碼器的類型
CFDictionaryRef _Nullable encoderSpecification,//如果用指定的視頻編碼器,就要設(shè)置這個(gè).使用NULL就是videoToolbox自己選擇一個(gè).
CFDictionaryRef _Nullable sourceImageBufferAttributes,//元象素緩存,如果你不要videoToolbox給你創(chuàng)建,就傳NULL.使用非VTB分配的緩存,可以讓你有機(jī)會拷貝圖片數(shù)據(jù).
CFAllocatorRef _Nullable compressedDataAllocator,//壓縮數(shù)據(jù)分配器.傳NULL可以使用默認(rèn)的.
VTCompressionOutputCallback _Nullable outputCallback,//回調(diào),這個(gè)方法會在另一個(gè)線程上被異步的VTCompressionSessionEncodeFrame
調(diào)用.只有在你要使VTCompressionSessionEncodeFrameWithOutputHandler去編碼幀時(shí),才可以設(shè)置為NULL.
void * _Nullable outputCallbackRefCon,//回調(diào)方法所在的實(shí)例,回調(diào)方法是全局的可以設(shè)置為NULL
VTCompressionSessionRef _Nullable * _Nonnull compressionSessionOut)//用來接收新的compression session
示例:
OSStatus status = VTCompressionSessionCreate(NULL, width, height, kCMVideoCodecType_H264, NULL, NULL, NULL, didCompressH264, (__bridge void *)(self), &EncodingSession);
if (status != 0)
{
NSLog(@"Error by VTCompressionSessionCreate ");
return ;
}
//continue...
2:(可選)配置session的屬性(Compression Properties)
使用VTSessionSetProperty(_:_:_:)或VTSessionSetProperties(_:_:)
示例
VTSessionSetProperty(EncodingSession, kVTCompressionPropertyKey_RealTime, kCFBooleanTrue);//實(shí)時(shí)運(yùn)行
VTSessionSetProperty(EncodingSession, kVTCompressionPropertyKey_ProfileLevel, kVTProfileLevel_H264_Baseline_4_1);
SInt32 bitRate = width*height*50; //越高效果越屌 幀數(shù)據(jù)越大
CFNumberRef ref = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &bitRate);
VTSessionSetProperty(EncodingSession, kVTCompressionPropertyKey_AverageBitRate, ref);
CFRelease(ref);
int frameInterval = 10; //關(guān)鍵幀間隔 越低效果越屌 幀數(shù)據(jù)越大
CFNumberRef frameIntervalRef = CFNumberCreate(kCFAllocatorDefault, kCFNumberIntType, &frameInterval);
VTSessionSetProperty(EncodingSession, kVTCompressionPropertyKey_MaxKeyFrameInterval,frameIntervalRef);
CFRelease(frameIntervalRef);
還有其它很多,可以去屬性查看
3:編碼一幀
VTCompressionSessionEncodeFrame(VTCompressionSessionRef _Nonnull session,//已經(jīng)被定義的compress session
CVImageBufferRef _Nonnull imageBuffer,//包含了一幀要被壓縮的視頻幀,這個(gè)buffer必須要有值.
CMTime presentationTimeStamp,//這一幀要顯示的時(shí)間,這個(gè)會關(guān)聯(lián)到采樣緩存.每一個(gè)顯示時(shí)間都必須要大于前一次的時(shí)間.
CMTime duration,//這一幀要顯示的持續(xù)時(shí)間,會關(guān)聯(lián)到采樣緩存,如果你沒有持續(xù)時(shí)間,傳kCMTimeInvalid.
CFDictionaryRef _Nullable frameProperties,//幀屬性
void * _Nullable sourceFrameRefCon,//幀的引用值,這個(gè)會被傳給輸出回調(diào)方法.
VTEncodeInfoFlags * _Nullable infoFlagsOut)//接受編碼操作的信息,可以傳NULL
示例:
- (void) encode:(CMSampleBufferRef )sampleBuffer
{
if (EncodingSession==nil||EncodingSession==NULL)
{
return;
}
dispatch_sync(aQueue, ^{
frameCount++;
CVImageBufferRef imageBuffer = (CVImageBufferRef)CMSampleBufferGetImageBuffer(sampleBuffer);
CMTime presentationTimeStamp = CMTimeMake(frameCount, 1000);
VTEncodeInfoFlags flags;
OSStatus statusCode = VTCompressionSessionEncodeFrame(EncodingSession,
imageBuffer,
presentationTimeStamp,
kCMTimeInvalid,
NULL, NULL, &flags);
if (statusCode != noErr)
{
if (EncodingSession!=nil||EncodingSession!=NULL)
{
VTCompressionSessionInvalidate(EncodingSession);
CFRelease(EncodingSession);
EncodingSession = NULL;
return;
}
}
});
}
4.強(qiáng)制完成一些或全部未處理的視頻幀.
調(diào)用VTCompressionSessionCompleteFrames(_:_:)
VTCompressionSessionCompleteFrames(VTCompressionSessionRef _Nonnull session,//compression session
CMTime completeUntilPresentationTimeStamp) // 視頻幀關(guān)聯(lián)的時(shí)間
如果completeUntilPresentationTimeStamp是數(shù)字的話,包括當(dāng)前時(shí)間和之前時(shí)間的幀都會在方法返回前發(fā)出(處理完)?.
如果completeUntilPresentationTimeStamp是不是數(shù)字的話,全部的未處理的幀都會在方法返回前發(fā)出(處理完?).
示例
VTCompressionSessionCompleteFrames(EncodingSession, kCMTimeInvalid);
5.當(dāng)你要結(jié)果編碼時(shí).
調(diào)用VTCompressionSessionInvalidate(_:)使session無效,并用CFRelease去釋放內(nèi)存.
示例
VTCompressionSessionInvalidate(EncodingSession);
CFRelease(EncodingSession);
EncodingSession = NULL;
=============================================
在編碼完成時(shí)會在
typedef void (*VTCompressionOutputCallback)(
void * CM_NULLABLE outputCallbackRefCon,
void * CM_NULLABLE sourceFrameRefCon,
OSStatus status,
VTEncodeInfoFlags infoFlags,
CM_NULLABLE CMSampleBufferRef sampleBuffer );
里返回,詳見上面的第一條.
void didCompressH264(void *outputCallbackRefCon, void *sourceFrameRefCon, OSStatus status, VTEncodeInfoFlags infoFlags,
CMSampleBufferRef sampleBuffer )
{
if (status != 0) return;
if (!CMSampleBufferDataIsReady(sampleBuffer))
{
NSLog(@"didCompressH264 data is not ready ");
return;
}
H264HwEncoderImpl* encoder = (__bridge H264HwEncoderImpl*)outputCallbackRefCon;
CFArrayRef array = CMSampleBufferGetSampleAttachmentsArray(sampleBuffer, true);
const void * value = CFArrayGetValueAtIndex(array, 0);
bool keyframe = !CFDictionaryContainsKey(value, kCMSampleAttachmentKey_NotSync);
if (keyframe)
{
CMFormatDescriptionRef format = CMSampleBufferGetFormatDescription(sampleBuffer);
size_t sparameterSetSize, sparameterSetCount;
const uint8_t *sparameterSet;
//提取SPS<<<<<
OSStatus statusCode = CMVideoFormatDescriptionGetH264ParameterSetAtIndex(format,
0,
&sparameterSet,
&sparameterSetSize,
&sparameterSetCount,
NULL );
if (statusCode == noErr)
{
size_t pparameterSetSize, pparameterSetCount;
const uint8_t *pparameterSet;
//提取PPS<<<<<<<
OSStatus statusCode = CMVideoFormatDescriptionGetH264ParameterSetAtIndex(format, 1, &pparameterSet, &pparameterSetSize, &pparameterSetCount, 0 );
if (statusCode == noErr)
{
encoder->sps = [NSData dataWithBytes:sparameterSet length:sparameterSetSize];
encoder->pps = [NSData dataWithBytes:pparameterSet length:pparameterSetSize];
if (encoder->_delegate)
{
[encoder->_delegate gotSpsPps:encoder->sps pps:encoder->pps];
}
}
}
}
//提取IDR<<<<<<<
CMBlockBufferRef dataBuffer = CMSampleBufferGetDataBuffer(sampleBuffer);
size_t length, totalLength;
char *dataPointer;
OSStatus statusCodeRet = CMBlockBufferGetDataPointer(dataBuffer, 0, &length, &totalLength, &dataPointer);
if (statusCodeRet == noErr) {
size_t bufferOffset = 0;
static const int AVCCHeaderLength = 4;
while (bufferOffset < totalLength - AVCCHeaderLength)
{
uint32_t NALUnitLength = 0;
memcpy(&NALUnitLength, dataPointer + bufferOffset, AVCCHeaderLength);
NALUnitLength = CFSwapInt32BigToHost(NALUnitLength);
NSData* data = [[NSData alloc] initWithBytes:(dataPointer + bufferOffset + AVCCHeaderLength) length:NALUnitLength];
[encoder->_delegate gotEncodedData:data isKeyFrame:keyframe];
bufferOffset += AVCCHeaderLength + NALUnitLength;
}
}
}
分解:
用以下方法,可以把SPS.PPS提取出來.
OSStatus statusCode = CMVideoFormatDescriptionGetH264ParameterSetAtIndex(format,
0,
&sparameterSet,
&sparameterSetSize,
&sparameterSetCount,
NULL );
用以下方法,可以把IDR提取出來,
CMBlockBufferRef dataBuffer = CMSampleBufferGetDataBuffer(sampleBuffer);
size_t length, totalLength;
char *dataPointer;//拿出來數(shù)據(jù)的格式是,頭四個(gè)字節(jié)是數(shù)據(jù)長度(但字節(jié)順序是反的),后面就是數(shù)據(jù)..可能是由多個(gè)這樣的格式組成.
OSStatus statusCodeRet = CMBlockBufferGetDataPointer(dataBuffer, 0, &length, &totalLength, &dataPointer);
if (statusCodeRet == noErr) {
size_t bufferOffset = 0;
static const int AVCCHeaderLength = 4;
while (bufferOffset < totalLength - AVCCHeaderLength)
{
uint32_t NALUnitLength = 0;
memcpy(&NALUnitLength, dataPointer + bufferOffset, AVCCHeaderLength);//取出頭4個(gè)字節(jié)存到NALUintLength,
NALUnitLength = CFSwapInt32BigToHost(NALUnitLength);//把字節(jié)順序反轉(zhuǎn)一下,得到數(shù)據(jù)長度.
NSData* data = [[NSData alloc] initWithBytes:(dataPointer + bufferOffset + AVCCHeaderLength) length:NALUnitLength];
[encoder->_delegate gotEncodedData:data isKeyFrame:keyframe];
bufferOffset += AVCCHeaderLength + NALUnitLength;
}
}