From: Rick Kern <ker...@gmail.com> Moved all declarations to the top of their block.
Signed-off-by: Rick Kern <ker...@gmail.com> --- libavcodec/vtenc.c | 409 ++++++++++++++++++++++++++++++++++------------------- 1 file changed, 260 insertions(+), 149 deletions(-) diff --git a/libavcodec/vtenc.c b/libavcodec/vtenc.c index 78a918b..a8cc340 100644 --- a/libavcodec/vtenc.c +++ b/libavcodec/vtenc.c @@ -67,17 +67,18 @@ typedef struct VTEncContext{ } VTEncContext; static void set_async_error(VTEncContext* vtctx, int err){ + BufNode* info; + pthread_mutex_lock(&vtctx->lock); vtctx->async_error = err; - BufNode* info = vtctx->q_head; + info = vtctx->q_head; vtctx->q_head = vtctx->q_tail = NULL; while(info){ - CFRelease(info->cm_buffer); - BufNode* next = info->next; + CFRelease(info->cm_buffer); free(info); info = next; } @@ -86,6 +87,8 @@ static void set_async_error(VTEncContext* vtctx, int err){ } static int vtenc_q_pop(VTEncContext* vtctx, bool wait, CMSampleBufferRef* buf){ + BufNode* info; + pthread_mutex_lock(&vtctx->lock); if(vtctx->async_error){ @@ -110,7 +113,7 @@ static int vtenc_q_pop(VTEncContext* vtctx, bool wait, CMSampleBufferRef* buf){ return 0; } - BufNode* info = vtctx->q_head; + info = vtctx->q_head; vtctx->q_head = vtctx->q_head->next; if(!vtctx->q_head){ vtctx->q_tail = NULL; @@ -141,12 +144,12 @@ static void vtenc_q_push(VTEncContext* vtctx, CMSampleBufferRef buffer){ pthread_cond_signal(&vtctx->cv_sample_sent); if(!vtctx->q_head){ - vtctx->q_head = vtctx->q_tail = info; - pthread_mutex_unlock(&vtctx->lock); - return; + vtctx->q_head = info; + } + else{ + vtctx->q_tail->next = info; } - vtctx->q_tail->next = info; vtctx->q_tail = info; pthread_mutex_unlock(&vtctx->lock); @@ -172,22 +175,21 @@ static void vtenc_free_block(void* opaque, uint8_t* data){ * In all cases, *dst_size is set to the number of bytes used starting * at *dst. */ - -static int get_params_info( +static int get_params_size( AVCodecContext* avctx, CMVideoFormatDescriptionRef vid_fmt, size_t* size) { size_t total_size = 0; size_t ps_count; - + size_t i; OSStatus status = CMVideoFormatDescriptionGetH264ParameterSetAtIndex(vid_fmt, 0, NULL, NULL, &ps_count, NULL); if(status){ av_log(avctx, AV_LOG_ERROR, "Error getting parameter set count: %d\n", status); return AVERROR_EXTERNAL; } - for(size_t i = 0; i < ps_count; i++){ + for(i = 0; i < ps_count; i++){ const uint8_t* ps; size_t ps_size; status = CMVideoFormatDescriptionGetH264ParameterSetAtIndex(vid_fmt, i, &ps, &ps_size, NULL, NULL); @@ -211,22 +213,26 @@ static int copy_param_sets( { size_t ps_count; OSStatus status = CMVideoFormatDescriptionGetH264ParameterSetAtIndex(vid_fmt, 0, NULL, NULL, &ps_count, NULL); + size_t offset = 0; + size_t i; + if(status){ av_log(avctx, AV_LOG_ERROR, "Error getting parameter set count for copying: %d\n", status); return AVERROR_EXTERNAL; } - size_t offset = 0; - for(size_t i = 0; i < ps_count; i++){ + for(i = 0; i < ps_count; i++){ const uint8_t* ps; size_t ps_size; + size_t next_offset; + status = CMVideoFormatDescriptionGetH264ParameterSetAtIndex(vid_fmt, i, &ps, &ps_size, NULL, NULL); if(status){ av_log(avctx, AV_LOG_ERROR, "Error getting parameter set data for index %zd: %d\n", i, status); return AVERROR_EXTERNAL; } - size_t next_offset = offset + sizeof(start_code) + ps_size; + next_offset = offset + sizeof(start_code) + ps_size; if(dst_size < next_offset){ av_log(avctx, AV_LOG_ERROR, "Error: buffer too small for parameter sets.\n"); return AVERROR_BUFFER_TOO_SMALL; @@ -244,15 +250,16 @@ static int copy_param_sets( static int set_extradata(AVCodecContext* avctx, CMSampleBufferRef sample_buffer){ CMVideoFormatDescriptionRef vid_fmt; + size_t total_size; + int status; + vid_fmt = CMSampleBufferGetFormatDescription(sample_buffer); if(!vid_fmt){ av_log(avctx, AV_LOG_ERROR, "No video format.\n"); return AVERROR_EXTERNAL; } - size_t total_size; - int status; - status = get_params_info(avctx, vid_fmt, &total_size); + status = get_params_size(avctx, vid_fmt, &total_size); if(status){ av_log(avctx, AV_LOG_ERROR, "Could not get parameter sets.\n"); return status; @@ -280,9 +287,8 @@ static av_cold void vtenc_output_callback( VTEncodeInfoFlags flags, CM_NULLABLE CMSampleBufferRef sample_buffer) { - av_assert0(ctx); AVCodecContext* avctx = (AVCodecContext*)ctx; - VTEncContext* vtctx = (VTEncContext*)avctx->priv_data; + VTEncContext* vtctx = avctx->priv_data; if(vtctx->async_error){ if(sample_buffer) CFRelease(sample_buffer); @@ -312,14 +318,16 @@ static int get_length_code_size( size_t* size) { CMVideoFormatDescriptionRef vid_fmt; + int isize; + OSStatus status; + vid_fmt = CMSampleBufferGetFormatDescription(sample_buffer); if(!vid_fmt){ av_log(avctx, AV_LOG_ERROR, "Error getting buffer format description.\n"); return AVERROR_EXTERNAL; } - int isize; - OSStatus status = CMVideoFormatDescriptionGetH264ParameterSetAtIndex(vid_fmt, 0, NULL, NULL, NULL, &isize); + status = CMVideoFormatDescriptionGetH264ParameterSetAtIndex(vid_fmt, 0, NULL, NULL, NULL, &isize); if(status){ av_log(avctx, AV_LOG_ERROR, "Error getting length code size: %d\n", status); return AVERROR_EXTERNAL; @@ -329,8 +337,46 @@ static int get_length_code_size( return 0; } +typedef enum VTEncLevel{ + kLevel_Auto, + kLevel_1_3, + kLevel_3_0, + kLevel_3_1, + kLevel_3_2, + kLevel_4_0, + kLevel_4_1, + kLevel_4_2, + kLevel_5_0, + kLevel_5_1, + kLevel_5_2, +} VTEncLevel; + +typedef struct VTEncValuePair{ + const char *const str; + const VTEncLevel value; +} VTEncValuePair; + +//Missing level aren't supported by VideoToolbox. +static const VTEncValuePair vtenc_h264_level_pairs[] = { + { "auto", kLevel_Auto }, + { "1.3" , kLevel_1_3 }, + { "3" , kLevel_3_0 }, + { "3.0" , kLevel_3_0 }, + { "3.1" , kLevel_3_1 }, + { "3.2" , kLevel_3_2 }, + { "4" , kLevel_4_0 }, + { "4.0" , kLevel_4_0 }, + { "4.1" , kLevel_4_1 }, + { "4.2" , kLevel_4_2 }, + { "5" , kLevel_5_0 }, + { "5.0" , kLevel_5_0 }, + { "5.1" , kLevel_5_1 }, + { "5.2" , kLevel_5_2 }, + { NULL } +}; + static bool get_h264_profile(AVCodecContext* avctx, int* profile_num){ - VTEncContext* vtctx = (VTEncContext*)avctx->priv_data; + VTEncContext* vtctx = avctx->priv_data; const char* profile = vtctx->profile; if(!profile){ @@ -353,80 +399,69 @@ static bool get_h264_profile(AVCodecContext* avctx, int* profile_num){ return true; } -static bool get_h264_level(AVCodecContext* avctx, int* level_num){ - VTEncContext* vtctx = (VTEncContext*)avctx->priv_data; +static bool get_h264_level(AVCodecContext* avctx, VTEncLevel* level_num){ + VTEncContext* vtctx = avctx->priv_data; + int i; if(!vtctx->level){ - *level_num = FF_LEVEL_UNKNOWN; + *level_num = kLevel_Auto; return true; } - char* end; - unsigned long major = strtoul(vtctx->level, &end, 10); - unsigned long minor = 0; - - if(!major || (*end && *end != '.')){ - av_log(avctx, AV_LOG_ERROR, "Error parsing level '%s'\n", vtctx->level); - return false; - } - - if(*end){ - minor = strtoul(end + 1, &end, 10); - } - - if(*end){ - av_log(avctx, AV_LOG_ERROR, "Error parsing level '%s'\n", vtctx->level); - return false; + for(i = 0; vtenc_h264_level_pairs[i].str; i++){ + const VTEncValuePair* pair = &vtenc_h264_level_pairs[i]; + if(!strcmp(pair->str, vtctx->level)){ + *level_num = pair->value; + return true; + } } - *level_num = (int)(major * 10 + minor); - return true; + return false; } /* * Returns true on success. * - * If profileLevelVal is NULL and this method returns true, don't specify the + * If profile_level_val is NULL and this method returns true, don't specify the * profile/level to the encoder. */ -static bool getVTProfileLevel(AVCodecContext* avctx, CFStringRef* profileLevelVal){ - VTEncContext* vtctx = (VTEncContext*)avctx->priv_data; - +static bool get_vt_profile_level(AVCodecContext* avctx, CFStringRef* profile_level_val){ + VTEncContext* vtctx = avctx->priv_data; int profile; - int level; + VTEncLevel level; if(!get_h264_profile(avctx, &profile)){ - return AVERROR(EINVAL); + return false; } if(!get_h264_level(avctx, &level)){ - return AVERROR(EINVAL); + return false; } if(profile == FF_LEVEL_UNKNOWN && - level != FF_LEVEL_UNKNOWN) + level != kLevel_Auto) { profile = vtctx->has_b_frames ? FF_PROFILE_H264_MAIN : FF_PROFILE_H264_BASELINE; } switch(profile){ case FF_PROFILE_UNKNOWN: - *profileLevelVal = NULL; + *profile_level_val = NULL; return true; case FF_PROFILE_H264_BASELINE: switch(level){ - case FF_LEVEL_UNKNOWN: *profileLevelVal = kVTProfileLevel_H264_Baseline_AutoLevel; break; - case 13: *profileLevelVal = kVTProfileLevel_H264_Baseline_1_3; break; - case 30: *profileLevelVal = kVTProfileLevel_H264_Baseline_3_0; break; - case 31: *profileLevelVal = kVTProfileLevel_H264_Baseline_3_1; break; - case 32: *profileLevelVal = kVTProfileLevel_H264_Baseline_3_2; break; - case 40: *profileLevelVal = kVTProfileLevel_H264_Baseline_4_0; break; - case 41: *profileLevelVal = kVTProfileLevel_H264_Baseline_4_1; break; - case 42: *profileLevelVal = kVTProfileLevel_H264_Baseline_4_2; break; - case 50: *profileLevelVal = kVTProfileLevel_H264_Baseline_5_0; break; - case 51: *profileLevelVal = kVTProfileLevel_H264_Baseline_5_1; break; - case 52: *profileLevelVal = kVTProfileLevel_H264_Baseline_5_2; break; + case kLevel_Auto: *profile_level_val = kVTProfileLevel_H264_Baseline_AutoLevel; break; + case kLevel_1_3: *profile_level_val = kVTProfileLevel_H264_Baseline_1_3; break; + case kLevel_3_0: *profile_level_val = kVTProfileLevel_H264_Baseline_3_0; break; + case kLevel_3_1: *profile_level_val = kVTProfileLevel_H264_Baseline_3_1; break; + case kLevel_3_2: *profile_level_val = kVTProfileLevel_H264_Baseline_3_2; break; + case kLevel_4_0: *profile_level_val = kVTProfileLevel_H264_Baseline_4_0; break; + case kLevel_4_1: *profile_level_val = kVTProfileLevel_H264_Baseline_4_1; break; + case kLevel_4_2: *profile_level_val = kVTProfileLevel_H264_Baseline_4_2; break; + case kLevel_5_0: *profile_level_val = kVTProfileLevel_H264_Baseline_5_0; break; + case kLevel_5_1: *profile_level_val = kVTProfileLevel_H264_Baseline_5_1; break; + case kLevel_5_2: *profile_level_val = kVTProfileLevel_H264_Baseline_5_2; break; default: av_log(avctx, AV_LOG_ERROR, "Unrecognized level %s (%d)\n", vtctx->level, level); return false; @@ -435,16 +470,16 @@ static bool getVTProfileLevel(AVCodecContext* avctx, CFStringRef* profileLevelVa case FF_PROFILE_H264_MAIN: switch(level){ - case FF_LEVEL_UNKNOWN: *profileLevelVal = kVTProfileLevel_H264_Main_AutoLevel; break; - case 30: *profileLevelVal = kVTProfileLevel_H264_Main_3_0; break; - case 31: *profileLevelVal = kVTProfileLevel_H264_Main_3_1; break; - case 32: *profileLevelVal = kVTProfileLevel_H264_Main_3_2; break; - case 40: *profileLevelVal = kVTProfileLevel_H264_Main_4_0; break; - case 41: *profileLevelVal = kVTProfileLevel_H264_Main_4_1; break; - case 42: *profileLevelVal = kVTProfileLevel_H264_Main_4_2; break; - case 50: *profileLevelVal = kVTProfileLevel_H264_Main_5_0; break; - case 51: *profileLevelVal = kVTProfileLevel_H264_Main_5_1; break; - case 52: *profileLevelVal = kVTProfileLevel_H264_Main_5_2; break; + case kLevel_Auto: *profile_level_val = kVTProfileLevel_H264_Main_AutoLevel; break; + case kLevel_3_0: *profile_level_val = kVTProfileLevel_H264_Main_3_0; break; + case kLevel_3_1: *profile_level_val = kVTProfileLevel_H264_Main_3_1; break; + case kLevel_3_2: *profile_level_val = kVTProfileLevel_H264_Main_3_2; break; + case kLevel_4_0: *profile_level_val = kVTProfileLevel_H264_Main_4_0; break; + case kLevel_4_1: *profile_level_val = kVTProfileLevel_H264_Main_4_1; break; + case kLevel_4_2: *profile_level_val = kVTProfileLevel_H264_Main_4_2; break; + case kLevel_5_0: *profile_level_val = kVTProfileLevel_H264_Main_5_0; break; + case kLevel_5_1: *profile_level_val = kVTProfileLevel_H264_Main_5_1; break; + case kLevel_5_2: *profile_level_val = kVTProfileLevel_H264_Main_5_2; break; default: av_log(avctx, AV_LOG_ERROR, "Unrecognized level %s (%d)\n", vtctx->level, level); return false; @@ -453,16 +488,16 @@ static bool getVTProfileLevel(AVCodecContext* avctx, CFStringRef* profileLevelVa case FF_PROFILE_H264_HIGH: switch(level){ - case FF_LEVEL_UNKNOWN: *profileLevelVal = kVTProfileLevel_H264_High_AutoLevel; break; - case 30: *profileLevelVal = kVTProfileLevel_H264_High_3_0; break; - case 31: *profileLevelVal = kVTProfileLevel_H264_High_3_1; break; - case 32: *profileLevelVal = kVTProfileLevel_H264_High_3_2; break; - case 40: *profileLevelVal = kVTProfileLevel_H264_High_4_0; break; - case 41: *profileLevelVal = kVTProfileLevel_H264_High_4_1; break; - case 42: *profileLevelVal = kVTProfileLevel_H264_High_4_2; break; - case 50: *profileLevelVal = kVTProfileLevel_H264_High_5_0; break; - case 51: *profileLevelVal = kVTProfileLevel_H264_High_5_1; break; - case 52: *profileLevelVal = kVTProfileLevel_H264_High_5_2; break; + case kLevel_Auto: *profile_level_val = kVTProfileLevel_H264_High_AutoLevel; break; + case kLevel_3_0: *profile_level_val = kVTProfileLevel_H264_High_3_0; break; + case kLevel_3_1: *profile_level_val = kVTProfileLevel_H264_High_3_1; break; + case kLevel_3_2: *profile_level_val = kVTProfileLevel_H264_High_3_2; break; + case kLevel_4_0: *profile_level_val = kVTProfileLevel_H264_High_4_0; break; + case kLevel_4_1: *profile_level_val = kVTProfileLevel_H264_High_4_1; break; + case kLevel_4_2: *profile_level_val = kVTProfileLevel_H264_High_4_2; break; + case kLevel_5_0: *profile_level_val = kVTProfileLevel_H264_High_5_0; break; + case kLevel_5_1: *profile_level_val = kVTProfileLevel_H264_High_5_1; break; + case kLevel_5_2: *profile_level_val = kVTProfileLevel_H264_High_5_2; break; default: av_log(avctx, AV_LOG_ERROR, "Unrecognized level %s (%d)\n", vtctx->level, level); return false; @@ -476,9 +511,13 @@ static bool getVTProfileLevel(AVCodecContext* avctx, CFStringRef* profileLevelVa } static av_cold int vtenc_init(AVCodecContext* avctx){ - VTEncContext* vtctx = (VTEncContext*)avctx->priv_data; - CMVideoCodecType codec_type; - OSStatus status; + CFMutableDictionaryRef enc_info; + CMVideoCodecType codec_type; + VTEncContext* vtctx = avctx->priv_data; + CFStringRef profile_level; + SInt32 bit_rate = avctx->bit_rate; + CFNumberRef bit_rate_num; + int status; codec_type = get_cm_codec_type(avctx->codec_id); if(!codec_type){ @@ -488,14 +527,13 @@ static av_cold int vtenc_init(AVCodecContext* avctx){ vtctx->has_b_frames = avctx->has_b_frames && avctx->max_b_frames > 0; - CFStringRef profileLevel; - if(!getVTProfileLevel(avctx, &profileLevel)){ + if(!get_vt_profile_level(avctx, &profile_level)){ return AVERROR(EINVAL); } vtctx->session = NULL; - CFMutableDictionaryRef enc_info = CFDictionaryCreateMutable( + enc_info = CFDictionaryCreateMutable( kCFAllocatorDefault, 20, &kCFCopyStringDictionaryKeyCallBacks, @@ -541,13 +579,14 @@ static av_cold int vtenc_init(AVCodecContext* avctx){ } #endif + CFRelease(enc_info); + if(status || !vtctx->session){ av_log(avctx, AV_LOG_ERROR, "Error: cannot create compression session: %d\n", status); return AVERROR_EXTERNAL; } - SInt32 bit_rate = avctx->bit_rate; - CFNumberRef bit_rate_num = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &bit_rate); + bit_rate_num = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &bit_rate); if(!bit_rate_num) return AVERROR(ENOMEM); status = VTSessionSetProperty(vtctx->session, kVTCompressionPropertyKey_AverageBitRate, bit_rate_num); @@ -558,8 +597,8 @@ static av_cold int vtenc_init(AVCodecContext* avctx){ return AVERROR_EXTERNAL; } - if(profileLevel){ - status = VTSessionSetProperty(vtctx->session, kVTCompressionPropertyKey_ProfileLevel, profileLevel); + if(profile_level){ + status = VTSessionSetProperty(vtctx->session, kVTCompressionPropertyKey_ProfileLevel, profile_level); if(status){ av_log(avctx, AV_LOG_ERROR, "Error setting profile/level property: %d\n", status); @@ -603,17 +642,18 @@ static void vtenc_get_frame_info( CMSampleBufferRef buffer, bool* is_key_frame) { - CFArrayRef attachments = CMSampleBufferGetSampleAttachmentsArray(buffer, false); - CFIndex len = !attachments ? 0 : CFArrayGetCount(attachments); + CFArrayRef attachments = CMSampleBufferGetSampleAttachmentsArray(buffer, false); + CFDictionaryRef attachment; + CFBooleanRef not_sync; + CFIndex len = !attachments ? 0 : CFArrayGetCount(attachments); if(!len){ *is_key_frame = true; return; } - CFDictionaryRef attachment = (CFDictionaryRef)CFArrayGetValueAtIndex(attachments, 0); + attachment = (CFDictionaryRef)CFArrayGetValueAtIndex(attachments, 0); - CFBooleanRef not_sync; if(CFDictionaryGetValueIfPresent(attachment, kCMSampleAttachmentKey_NotSync, (const void**)¬_sync)){ *is_key_frame = !CFBooleanGetValue(not_sync); } @@ -622,22 +662,40 @@ static void vtenc_get_frame_info( } } +/** + * Replaces length codes with H.264 Annex B start codes. + * length_code_size must equal sizeof(start_code). + * On failure, the contents of data may have been modified. + * + * @param length_code_size Byte length of each length code + * @param data Call with NAL units prefixed with length codes. + * On success, the length codes are replace with + * start codes. + * @param size Length of data, excluding any padding. + * @return 0 on success + * AVERROR_BUFFER_TOO_SMALL if length code size is smaller + * than a start code or if a length_code in data specifies + * data beyond the end of its buffer. + */ static int replace_length_codes(size_t length_code_size, uint8_t* data, size_t size){ + size_t remaining_size = size; + if(length_code_size != sizeof(start_code)){ - av_log(NULL, AV_LOG_ERROR, "lc != sizeof\n"); + av_log(NULL, AV_LOG_ERROR, "Start code size and length code size not equal.\n"); return AVERROR_BUFFER_TOO_SMALL; } - size_t remaining_size = size; while(remaining_size > 0){ size_t box_len = 0; - for(size_t i = 0; i < length_code_size; i++){ + size_t i; + + for(i = 0; i < length_code_size; i++){ box_len <<= 8; box_len |= data[i]; } if(remaining_size < box_len + sizeof(start_code)){ - av_log(NULL, AV_LOG_ERROR, "too small 2\n"); + av_log(NULL, AV_LOG_ERROR, "Length is out of range.\n"); AVERROR_BUFFER_TOO_SMALL; } @@ -649,7 +707,25 @@ static int replace_length_codes(size_t length_code_size, uint8_t* data, size_t s return 0; } -//expects zeroed memory +/** + * Copies NAL units and replaces length codes with + * H.264 Annex B start codes. On failure, the contents of + * dst_data may have been modified. + * + * @param length_code_size Byte length of each length code + * @param src_data NAL units prefixed with length codes. + * @param src_size Length of buffer, excluding any padding. + * @param dst_data Must be zeroed before calling this function. + * Contains the copied NAL units prefixed with + * start codes when the function returns + * successfully. + * @param dst_size Length of dst_data + * @return 0 on success + * AVERROR_INVALIDDATA if length_code_size is invalid + * AVERROR_BUFFER_TOO_SMALL if dst_data is too small + * or if a length_code in src_data specifies data beyond + * the end of its buffer. + */ static int copy_replace_length_codes( size_t length_code_size, const uint8_t* src_data, @@ -657,21 +733,29 @@ static int copy_replace_length_codes( uint8_t* dst_data, size_t dst_size) { + size_t remaining_src_size = src_size; + size_t remaining_dst_size = dst_size; + if(length_code_size > 4){ return AVERROR_INVALIDDATA; } - size_t remaining_src_size = src_size; - size_t remaining_dst_size = dst_size; while(remaining_src_size > 0){ + size_t curr_src_len; + size_t curr_dst_len; size_t box_len = 0; - for(size_t i = 0; i < length_code_size; i++){ + size_t i; + + uint8_t* dst_box; + const uint8_t* src_box; + + for(i = 0; i < length_code_size; i++){ box_len <<= 8; box_len |= src_data[i]; } - size_t curr_src_len = box_len + length_code_size; - size_t curr_dst_len = box_len + sizeof(start_code); + curr_src_len = box_len + length_code_size; + curr_dst_len = box_len + sizeof(start_code); if(remaining_src_size < curr_src_len){ return AVERROR_BUFFER_TOO_SMALL; @@ -681,8 +765,8 @@ static int copy_replace_length_codes( return AVERROR_BUFFER_TOO_SMALL; } - uint8_t* dst_box = dst_data + sizeof(start_code); - const uint8_t* src_box = src_data + length_code_size; + dst_box = dst_data + sizeof(start_code); + src_box = src_data + length_code_size; memcpy(dst_data, start_code, sizeof(start_code)); memcpy(dst_box, src_box, box_len); @@ -702,12 +786,23 @@ static int vtenc_cm_to_avpacket( CMSampleBufferRef sample_buffer, AVPacket* pkt) { - VTEncContext* vtctx = (VTEncContext*)avctx->priv_data; + VTEncContext* vtctx = avctx->priv_data; + + int status; + bool is_key_frame; + bool add_header; + char* buf_data; + size_t length_code_size; + size_t header_size = 0; + size_t in_buf_size; + int64_t dts_delta; + int64_t time_base_num; + CMTime pts; + CMTime dts; + + CMBlockBufferRef block; + CMVideoFormatDescriptionRef vid_fmt; - bool is_key_frame; - bool add_header; - size_t length_code_size; - int status; vtenc_get_frame_info(sample_buffer, &is_key_frame); status = get_length_code_size(avctx, sample_buffer, &length_code_size); @@ -715,26 +810,23 @@ static int vtenc_cm_to_avpacket( add_header = is_key_frame && !(avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER); - size_t header_size = 0; - CMVideoFormatDescriptionRef vid_fmt; if(add_header){ vid_fmt = CMSampleBufferGetFormatDescription(sample_buffer); if(!vid_fmt){ av_log(avctx, AV_LOG_ERROR, "Cannot get format description.\n"); } - int status = get_params_info(avctx, vid_fmt, &header_size); + int status = get_params_size(avctx, vid_fmt, &header_size); if(status) return status; } - CMBlockBufferRef block = CMSampleBufferGetDataBuffer(sample_buffer); + block = CMSampleBufferGetDataBuffer(sample_buffer); if(!block){ av_log(avctx, AV_LOG_ERROR, "Could not get block buffer from sample buffer.\n"); return AVERROR_EXTERNAL; } - size_t in_buf_size; - char* buf_data; + status = CMBlockBufferGetDataPointer(block, 0, &in_buf_size, NULL, &buf_data); if(status){ av_log(avctx, AV_LOG_ERROR, "Error: cannot get data pointer: %d\n", status); @@ -749,8 +841,6 @@ static int vtenc_cm_to_avpacket( av_init_packet(pkt); if(can_reuse_cmbuffer){ - CFRetain(block); - AVBufferRef* buf_ref = av_buffer_create( buf_data, out_buf_size, @@ -759,10 +849,9 @@ static int vtenc_cm_to_avpacket( 0 ); - if(!buf_ref){ - CFRelease(block); - return AVERROR(ENOMEM); - } + if(!buf_ref) return AVERROR(ENOMEM); + + CFRetain(block); pkt->buf = buf_ref; pkt->data = buf_data; @@ -808,11 +897,11 @@ static int vtenc_cm_to_avpacket( pkt->flags |= AV_PKT_FLAG_KEY; } - const CMTime pts = CMSampleBufferGetPresentationTimeStamp(sample_buffer); - const CMTime dts = CMSampleBufferGetDecodeTimeStamp (sample_buffer); + pts = CMSampleBufferGetPresentationTimeStamp(sample_buffer); + dts = CMSampleBufferGetDecodeTimeStamp (sample_buffer); - int64_t dts_delta = vtctx->dts_delta >= 0 ? vtctx->dts_delta : 0; - int64_t time_base_num = avctx->time_base.num; + dts_delta = vtctx->dts_delta >= 0 ? vtctx->dts_delta : 0; + time_base_num = avctx->time_base.num; pkt->pts = pts.value / time_base_num; pkt->dts = dts.value / time_base_num - dts_delta; @@ -828,8 +917,7 @@ static int get_cv_pixel_info( size_t* heights, size_t* strides) { - VTEncContext* vtctx = (VTEncContext*)avctx->priv_data; - + VTEncContext* vtctx = avctx->priv_data; int av_format = avctx->pix_fmt; int av_color_range = avctx->color_range; @@ -919,15 +1007,30 @@ static int get_cv_pixel_info( return 0; } -static int vtenc_send_frame(AVCodecContext* avctx, VTEncContext* vtctx, const AVFrame* frame){ - av_assert0(frame); +static void free_avframe( + void *CV_NULLABLE release_ctx, + const void *CV_NULLABLE data, + size_t size, + size_t plane_count, + const void *CV_NULLABLE plane_addresses[]) +{ + AVFrame* frame = release_ctx; + av_frame_free(&frame); +} +static int vtenc_send_frame(AVCodecContext* avctx, VTEncContext* vtctx, const AVFrame* frame){ int plane_count; int color; size_t widths [AV_NUM_DATA_POINTERS]; size_t heights[AV_NUM_DATA_POINTERS]; size_t strides[AV_NUM_DATA_POINTERS]; - int status = get_cv_pixel_info(avctx, frame, &color, &plane_count, widths, heights, strides); + int status; + CVPixelBufferRef cv_img; + CMTime time; + + av_assert0(frame); + + status = get_cv_pixel_info(avctx, frame, &color, &plane_count, widths, heights, strides); if(status){ av_log( avctx, @@ -940,21 +1043,29 @@ static int vtenc_send_frame(AVCodecContext* avctx, VTEncContext* vtctx, const AV return AVERROR_EXTERNAL; } - CVPixelBufferRef cv_img; + AVFrame* enc_frame = av_frame_alloc(); + if(!enc_frame) return AVERROR(ENOMEM); + + status = av_frame_ref(enc_frame, frame); + if(status){ + av_frame_free(&enc_frame); + return status; + } + status = CVPixelBufferCreateWithPlanarBytes( kCFAllocatorDefault, - frame->width, - frame->height, + enc_frame->width, + enc_frame->height, color, NULL, 0, plane_count, - (void**)frame->data, + (void**)enc_frame->data, widths, heights, strides, - NULL, - NULL, + free_avframe, + enc_frame, NULL, &cv_img ); @@ -964,8 +1075,7 @@ static int vtenc_send_frame(AVCodecContext* avctx, VTEncContext* vtctx, const AV return AVERROR_EXTERNAL; } - CMTime time = CMTimeMake(frame->pts * avctx->time_base.num, avctx->time_base.den); - VTEncodeInfoFlags flags = 0; + time = CMTimeMake(frame->pts * avctx->time_base.num, avctx->time_base.den); status = VTCompressionSessionEncodeFrame( vtctx->session, cv_img, @@ -973,7 +1083,7 @@ static int vtenc_send_frame(AVCodecContext* avctx, VTEncContext* vtctx, const AV kCMTimeInvalid, NULL, NULL, - &flags + NULL ); CFRelease(cv_img); @@ -992,8 +1102,10 @@ static av_cold int vtenc_frame( const AVFrame* frame, int* got_packet) { - VTEncContext* vtctx = (VTEncContext*)avctx->priv_data; + VTEncContext* vtctx = avctx->priv_data; + bool get_frame; int status; + CMSampleBufferRef buf = NULL; if(frame){ status = vtenc_send_frame(avctx, vtctx, frame); @@ -1025,13 +1137,12 @@ static av_cold int vtenc_frame( } *got_packet = 0; - bool get_frame = vtctx->dts_delta >= 0 || !frame; + get_frame = vtctx->dts_delta >= 0 || !frame; if(!get_frame){ status = 0; goto end_nopkt; } - CMSampleBufferRef buf = NULL; status = vtenc_q_pop(vtctx, !frame, &buf); if(status) goto end_nopkt; if(!buf) goto end_nopkt; @@ -1049,7 +1160,7 @@ end_nopkt: } static av_cold int vtenc_close(AVCodecContext* avctx){ - VTEncContext* vtctx = (VTEncContext*)avctx->priv_data; + VTEncContext* vtctx = avctx->priv_data; if(!vtctx->session) return 0; -- 2.4.9 (Apple Git-60) _______________________________________________ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org http://ffmpeg.org/mailman/listinfo/ffmpeg-devel