The main benefit comes from propagating container level metadata like hdr, which is more commonly used than the relevant Metadata OBUs.
Signed-off-by: James Almer <jamr...@gmail.com> --- libavcodec/libdav1d.c | 94 +++++++++++++++++++++---------------------- 1 file changed, 46 insertions(+), 48 deletions(-) diff --git a/libavcodec/libdav1d.c b/libavcodec/libdav1d.c index 3af7ef4edc..1b9289824f 100644 --- a/libavcodec/libdav1d.c +++ b/libavcodec/libdav1d.c @@ -119,9 +119,45 @@ static void libdav1d_picture_release(Dav1dPicture *p, void *cookie) av_buffer_unref(&buf); } +static void libdav1d_init_params(AVCodecContext *c, Dav1dSequenceHeader *seq) +{ + c->profile = seq->profile; + c->level = ((seq->operating_points[0].major_level - 2) << 2) + | seq->operating_points[0].minor_level; + + switch (seq->chr) { + case DAV1D_CHR_VERTICAL: + c->chroma_sample_location = AVCHROMA_LOC_LEFT; + break; + case DAV1D_CHR_COLOCATED: + c->chroma_sample_location = AVCHROMA_LOC_TOPLEFT; + break; + } + c->colorspace = (enum AVColorSpace) seq->mtrx; + c->color_primaries = (enum AVColorPrimaries) seq->pri; + c->color_trc = (enum AVColorTransferCharacteristic) seq->trc; + c->color_range = seq->color_range ? AVCOL_RANGE_JPEG : AVCOL_RANGE_MPEG; + + if (seq->layout == DAV1D_PIXEL_LAYOUT_I444 && + seq->mtrx == DAV1D_MC_IDENTITY && + seq->pri == DAV1D_COLOR_PRI_BT709 && + seq->trc == DAV1D_TRC_SRGB) + c->pix_fmt = pix_fmt_rgb[seq->hbd]; + else + c->pix_fmt = pix_fmt[seq->layout][seq->hbd]; + + if (seq->num_units_in_tick && seq->time_scale) { + av_reduce(&c->framerate.den, &c->framerate.num, + seq->num_units_in_tick, seq->time_scale, INT_MAX); + if (seq->equal_picture_interval) + c->ticks_per_frame = seq->num_ticks_per_picture; + } +} + static av_cold int libdav1d_init(AVCodecContext *c) { Libdav1dContext *dav1d = c->priv_data; + Dav1dSequenceHeader seq; Dav1dSettings s; int threads = (c->thread_count ? c->thread_count : av_cpu_count()) * 3 / 2; int res; @@ -198,10 +234,6 @@ static int libdav1d_receive_frame(AVCodecContext *c, AVFrame *frame) return res; } - data->m.timestamp = pkt.pts; - data->m.offset = pkt.pos; - data->m.duration = pkt.duration; - pkt.buf = NULL; av_packet_unref(&pkt); @@ -258,9 +290,11 @@ static int libdav1d_receive_frame(AVCodecContext *c, AVFrame *frame) frame->linesize[1] = p->stride[1]; frame->linesize[2] = p->stride[1]; - c->profile = p->seq_hdr->profile; - c->level = ((p->seq_hdr->operating_points[0].major_level - 2) << 2) - | p->seq_hdr->operating_points[0].minor_level; + libdav1d_init_params(c, p->seq_hdr); + res = ff_decode_frame_props(c, frame); + if (res < 0) + goto fail; + frame->width = p->p.w; frame->height = p->p.h; if (c->width != p->p.w || c->height != p->p.h) { @@ -276,50 +310,12 @@ static int libdav1d_receive_frame(AVCodecContext *c, AVFrame *frame) INT_MAX); ff_set_sar(c, frame->sample_aspect_ratio); - switch (p->seq_hdr->chr) { - case DAV1D_CHR_VERTICAL: - frame->chroma_location = c->chroma_sample_location = AVCHROMA_LOC_LEFT; - break; - case DAV1D_CHR_COLOCATED: - frame->chroma_location = c->chroma_sample_location = AVCHROMA_LOC_TOPLEFT; - break; - } - frame->colorspace = c->colorspace = (enum AVColorSpace) p->seq_hdr->mtrx; - frame->color_primaries = c->color_primaries = (enum AVColorPrimaries) p->seq_hdr->pri; - frame->color_trc = c->color_trc = (enum AVColorTransferCharacteristic) p->seq_hdr->trc; - frame->color_range = c->color_range = p->seq_hdr->color_range ? AVCOL_RANGE_JPEG : AVCOL_RANGE_MPEG; - - if (p->p.layout == DAV1D_PIXEL_LAYOUT_I444 && - p->seq_hdr->mtrx == DAV1D_MC_IDENTITY && - p->seq_hdr->pri == DAV1D_COLOR_PRI_BT709 && - p->seq_hdr->trc == DAV1D_TRC_SRGB) - frame->format = c->pix_fmt = pix_fmt_rgb[p->seq_hdr->hbd]; - else - frame->format = c->pix_fmt = pix_fmt[p->p.layout][p->seq_hdr->hbd]; - if (p->m.user_data.data) memcpy(&frame->reordered_opaque, p->m.user_data.data, sizeof(frame->reordered_opaque)); else frame->reordered_opaque = AV_NOPTS_VALUE; - if (p->seq_hdr->num_units_in_tick && p->seq_hdr->time_scale) { - av_reduce(&c->framerate.den, &c->framerate.num, - p->seq_hdr->num_units_in_tick, p->seq_hdr->time_scale, INT_MAX); - if (p->seq_hdr->equal_picture_interval) - c->ticks_per_frame = p->seq_hdr->num_ticks_per_picture; - } - - // match timestamps and packet size - frame->pts = frame->best_effort_timestamp = p->m.timestamp; -#if FF_API_PKT_PTS -FF_DISABLE_DEPRECATION_WARNINGS - frame->pkt_pts = p->m.timestamp; -FF_ENABLE_DEPRECATION_WARNINGS -#endif - frame->pkt_dts = p->m.timestamp; - frame->pkt_pos = p->m.offset; - frame->pkt_size = p->m.size; - frame->pkt_duration = p->m.duration; + frame->best_effort_timestamp = frame->pts; frame->key_frame = p->frame_hdr->frame_type == DAV1D_FRAME_TYPE_KEY; switch (p->frame_hdr->frame_type) { @@ -338,7 +334,9 @@ FF_ENABLE_DEPRECATION_WARNINGS goto fail; } - if (p->mastering_display) { + // Give priority to metadata at the container level. + // See "AV1 Codec ISO Media File Format Binding". + if (p->mastering_display && !av_frame_get_side_data(frame, AV_FRAME_DATA_MASTERING_DISPLAY_METADATA)) { AVMasteringDisplayMetadata *mastering = av_mastering_display_metadata_create_side_data(frame); if (!mastering) { res = AVERROR(ENOMEM); @@ -358,7 +356,7 @@ FF_ENABLE_DEPRECATION_WARNINGS mastering->has_primaries = 1; mastering->has_luminance = 1; } - if (p->content_light) { + if (p->content_light && !av_frame_get_side_data(frame, AV_FRAME_DATA_CONTENT_LIGHT_LEVEL)) { AVContentLightMetadata *light = av_content_light_metadata_create_side_data(frame); if (!light) { res = AVERROR(ENOMEM); -- 2.27.0 _______________________________________________ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org https://ffmpeg.org/mailman/listinfo/ffmpeg-devel To unsubscribe, visit link above, or email ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".