On 2017/7/30 8:07, Mark Thompson wrote: > On 28/07/17 07:01, Jun Zhao wrote: >> From d5414b451012b3a0169740a26f452785eb44cce5 Mon Sep 17 00:00:00 2001 >> From: Jun Zhao <jun.z...@intel.com> >> Date: Fri, 28 Jul 2017 01:39:27 -0400 >> Subject: [PATCH] examples/vaapi_enc: Add a VAAPI encoding example. >> >> Add a VAAPI encoding example. >> >> Use hwupload loading the raw date in HW surface, usage >> like this: ./vaapi_enc 1920 1080 input.yuv test.h264 >> >> Signed-off-by: Liu, Kaixuan <kaixuan....@intel.com> >> Signed-off-by: Jun Zhao <jun.z...@intel.com> >> --- >> doc/examples/vaapi_enc.c | 291 >> +++++++++++++++++++++++++++++++++++++++++++++++ >> 1 file changed, 291 insertions(+) >> create mode 100644 doc/examples/vaapi_enc.c > > A general thought: do you actually want to use lavfi here? All it's really > doing is the hw frame creation and upload, which would be shorter to > implement directly (av_hwframe_ctx_create(), av_hwframe_ctx_init(), > av_hwframe_transfer_data()). If the example might be extended with more > stuff going on in filters then obviously the lavfi stuff is needed, but it > seems overcomplicated if the intent is just to demonstrate encode.
As the API view, I don't want to use lavfi for VAAPI NEC example, I prefer a simple API or simple step than use lavfi to load YUV from CPU to GPU surface, Can we give a simple API or step to load YUV to HW surface in this case ? even use av_hwframe_xxx interface, it's not a easy task for the caller. > > >> diff --git a/doc/examples/vaapi_enc.c b/doc/examples/vaapi_enc.c >> new file mode 100644 >> index 0000000000..e26db0e343 >> --- /dev/null >> +++ b/doc/examples/vaapi_enc.c >> @@ -0,0 +1,291 @@ >> +/* >> + * Video Acceleration API (video encoding) encode sample >> + * >> + * This file is part of FFmpeg. >> + * >> + * FFmpeg is free software; you can redistribute it and/or >> + * modify it under the terms of the GNU Lesser General Public >> + * License as published by the Free Software Foundation; either >> + * version 2.1 of the License, or (at your option) any later version. >> + * >> + * FFmpeg is distributed in the hope that it will be useful, >> + * but WITHOUT ANY WARRANTY; without even the implied warranty of >> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU >> + * Lesser General Public License for more details. >> + * >> + * You should have received a copy of the GNU Lesser General Public >> + * License along with FFmpeg; if not, write to the Free Software >> + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 >> USA >> + */ >> + >> +/** >> + * @file >> + * Intel VAAPI-accelerated encoding example. >> + * >> + * @example vaapi_enc.c >> + * This example shows how to do VAAPI-accelerated encoding. now only >> support NV12 >> + * raw file, usage: vaapi_enc 1920 1080 input.yuv output.h264 >> + */ >> + >> +#include <stdio.h> >> +#include <string.h> >> + >> +#include <libavcodec/avcodec.h> >> +#include <libavformat/avformat.h> >> +#include <libavfilter/avfiltergraph.h> >> +#include <libavfilter/buffersink.h> >> +#include <libavfilter/buffersrc.h> >> +#include <libavutil/pixdesc.h> >> +#include <libavutil/hwcontext.h> >> + >> +typedef struct FilterContext { >> + AVFilterContext *buffersink_ctx; >> + AVFilterContext *buffersrc_ctx; >> + AVFilterGraph *filter_graph; >> +} FilterContext; >> + >> +static int width, height; >> +static AVBufferRef *hw_device_ctx = NULL; >> + >> +static int >> +init_filter(FilterContext *filter_ctx, char *args, AVBufferRef >> *hw_device_ctx) >> +{ >> + char filter_spec[] = "format=nv12,hwupload"; >> + int ret = 0, i = 0; >> + AVFilter *buffersrc, *buffersink; >> + AVFilterContext *buffersrc_ctx, *buffersink_ctx; >> + AVFilterInOut *outputs = avfilter_inout_alloc(); >> + AVFilterInOut *inputs = avfilter_inout_alloc(); >> + AVFilterGraph *filter_graph = avfilter_graph_alloc(); >> + >> + buffersrc = avfilter_get_by_name("buffer"); >> + buffersink = avfilter_get_by_name("buffersink"); >> + if (!buffersrc || !buffersink) { >> + av_log(NULL, AV_LOG_ERROR, "filtering source or sink element not >> found\n"); >> + ret = AVERROR_UNKNOWN; >> + goto fail; >> + } >> + >> + ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in", >> + args, NULL, filter_graph); >> + if (ret < 0) { >> + av_log(NULL, AV_LOG_ERROR, "Cannot create buffer source\n"); >> + goto fail; >> + } >> + ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out", >> + NULL, NULL, filter_graph); >> + if (ret < 0) { >> + av_log(NULL, AV_LOG_ERROR, "Cannot create buffer sink.\n"); >> + goto fail; >> + } >> + >> + outputs->name = av_strdup("in"); >> + outputs->filter_ctx = buffersrc_ctx; >> + outputs->pad_idx = 0; >> + outputs->next = NULL; >> + inputs->name = av_strdup("out"); >> + inputs->filter_ctx = buffersink_ctx; >> + inputs->pad_idx = 0; >> + inputs->next = NULL; >> + if (!outputs->name || !inputs->name) { >> + ret = AVERROR(ENOMEM); >> + goto fail; >> + } >> + >> + if ((ret = avfilter_graph_parse_ptr(filter_graph, filter_spec, >> + &inputs, &outputs, NULL)) < 0) >> + goto fail; >> + if (hw_device_ctx) { >> + for (i = 0; i < filter_graph->nb_filters; i++) { >> + filter_graph->filters[i]->hw_device_ctx = >> av_buffer_ref(hw_device_ctx); >> + } >> + } >> + >> + if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0) >> + goto fail; >> + >> + filter_ctx->buffersrc_ctx = buffersrc_ctx; >> + filter_ctx->buffersink_ctx = buffersink_ctx; >> + filter_ctx->filter_graph = filter_graph; >> + >> +fail: >> + avfilter_inout_free(&inputs); >> + avfilter_inout_free(&outputs); >> + return ret; >> +} >> + >> +static int encode_write(AVCodecContext *avctx, AVFrame *frame, FILE *fout) >> +{ >> + int ret = 0; >> + AVPacket enc_pkt; >> + >> + av_init_packet(&enc_pkt); >> + enc_pkt.data = NULL; >> + enc_pkt.size = 0; >> + >> + if ((ret = avcodec_send_frame(avctx, frame)) < 0) >> + goto end; >> + while (1) { >> + ret = avcodec_receive_packet(avctx, &enc_pkt); >> + if (!ret) { >> + enc_pkt.stream_index = 0; >> + ret = fwrite(enc_pkt.data, enc_pkt.size, 1, fout); >> + } else { >> + break; >> + } >> + } >> + >> +end: >> + ret = ((ret == AVERROR(EAGAIN)) ? 0 : -1); >> + return ret; >> +} >> + >> +int main(int argc, char *argv[]) >> +{ >> + int ret, size; >> + FILE *fin, *fout; >> + AVFrame *sw_frame, *hw_frame; >> + AVCodecContext *avctx = NULL; >> + FilterContext *filter_ctx; >> + AVCodec *codec = NULL; >> + uint8_t *frame_buf; >> + const char *input_file, *output_file; >> + const char *enc_name = "h264_vaapi"; >> + char args[512]; >> + int count = 0; > > Print a usage line here if there aren't the expected number of arguments? OK > >> + >> + width = atoi(argv[1]); >> + height = atoi(argv[2]); >> + input_file = argv[3]; >> + output_file = argv[4]; >> + >> + size = width * height; >> + frame_buf = malloc((size * 3) / 2); /* size for nv12 frame */ >> + fin = fopen(input_file, "r"); >> + fout = fopen(output_file, "w+b"); >> + if (!fin || !fout) { >> + fprintf(stderr, "Fail to open input or output file.\n"); >> + ret = -1; >> + goto close; >> + } >> + >> + av_register_all(); >> + avfilter_register_all(); >> + >> + ret = av_hwdevice_ctx_create(&hw_device_ctx, AV_HWDEVICE_TYPE_VAAPI, >> + NULL, NULL, 0); >> + if (ret < 0) { >> + fprintf(stderr, "Failed to create a VAAPI device.\n"); >> + goto close; >> + } >> + >> + if (!(codec = avcodec_find_encoder_by_name(enc_name))) { >> + fprintf(stderr, "Could not find encoder.\n"); >> + ret = -1; >> + goto close; >> + } >> + >> + if (!(avctx = avcodec_alloc_context3(codec))) { >> + ret = AVERROR(ENOMEM); >> + goto close; >> + } >> + >> + avctx->width = width; >> + avctx->height = height; >> + avctx->time_base = (AVRational){1, 25}; >> + avctx->pix_fmt = AV_PIX_FMT_VAAPI; >> + >> + /* create filters and binding HWDevice */ >> + snprintf(args, sizeof(args), >> + >> "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d:frame_rate=%d/%d", >> + avctx->width, avctx->height, AV_PIX_FMT_NV12, >> + avctx->time_base.num, avctx->time_base.den, >> + avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den, >> + avctx->framerate.num, avctx->framerate.den); > > SAR and framerate aren't initialised? SAR probably doesn't matter, but I > think framerate wants to be 1/time_base here to make it clearly CFR. Also, > since it does end up in the output stream, having it be configurable as > another command-line option might be nice. Will double-check this part. > >> + >> + if (!(filter_ctx = av_malloc(sizeof(*filter_ctx)))) { >> + ret = AVERROR(ENOMEM); >> + goto close; >> + } >> + >> + if ((ret = init_filter(filter_ctx, args, hw_device_ctx)) < 0) { >> + fprintf(stderr, "Failed to initialize the filtering context.\n"); >> + goto close; >> + } >> + >> + avctx->hw_frames_ctx = av_buffer_ref(av_buffersink_get_hw_frames_ctx >> + (filter_ctx->buffersink_ctx)); > > Can fail. Will add error handle > >> + if ((ret = avcodec_open2(avctx, codec, NULL)) < 0) >> + fprintf(stderr, "Cannot open video encoder codec.\n"); >> + >> + while (fread(frame_buf, (size * 3)/2, 1, fin) > 0) { > > Will encode a combination of old and new data on a short read. > >> + if (!(sw_frame = av_frame_alloc())) { >> + ret = AVERROR(ENOMEM); >> + goto close; >> + } >> + sw_frame->data[0] = frame_buf; >> + sw_frame->data[1] = frame_buf + size; >> + sw_frame->linesize[0] = width; >> + sw_frame->linesize[1] = width; >> + sw_frame->width = width; >> + sw_frame->height = height; >> + sw_frame->format = AV_PIX_FMT_NV12; > > I think it would be nicer to only use refcounted frames here. Allocate, set > width/height/format, call av_frame_get_buffer(), then fread() into the data > planes. Will clean this part. > >> + /* push the sw frame into the filtergraph */ >> + ret = av_buffersrc_add_frame_flags(filter_ctx->buffersrc_ctx, >> + sw_frame, 0); >> + if (ret < 0) { >> + fprintf(stderr, "Error while feeding the filtergraph.\n"); >> + goto close; >> + } >> + /* pull hw frames from the filtergraph */ >> + while (1) { >> + if (!(hw_frame = av_frame_alloc())) { >> + ret = AVERROR(ENOMEM); >> + goto close; >> + } >> + if ((ret = (av_buffersink_get_frame(filter_ctx->buffersink_ctx, >> hw_frame))) < 0) { >> + /* if no more frames for output - returns AVERROR(EAGAIN) >> + * if flushed and no more frames for output - returns >> AVERROR_EOF >> + * rewrite retcode to 0 to show it as normal procedure >> completion >> + */ >> + if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) >> + ret = 0; >> + av_frame_free(&hw_frame); >> + break; >> + } >> + count++; >> + if (count % avctx->gop_size == 1) { >> + hw_frame->pict_type = AV_PICTURE_TYPE_I; >> + } else { >> + hw_frame->pict_type = AV_PICTURE_TYPE_B; >> + } > > The GOP setting is already used inside the encoder, so this won't do > anything? Showing that pict_type can be set to generate intra frames might > be useful for an example, but I think this needs a slightly more meaningful > mechanism. > >> + >> + if ((ret = (encode_write(avctx, hw_frame, fout))) < 0) { >> + fprintf(stderr, "Failed to encode.\n"); >> + goto close; >> + } >> + av_frame_free(&hw_frame); >> + } >> + av_frame_free(&sw_frame); >> + } >> + >> + /* flush encode */ >> + ret = encode_write(avctx, NULL, fout); >> + >> +close: >> + fclose(fin); >> + fclose(fout); >> + av_frame_free(&sw_frame); >> + av_frame_free(&hw_frame); >> + avcodec_free_context(&avctx); >> + if (filter_ctx) { >> + avfilter_free(filter_ctx->buffersrc_ctx); >> + avfilter_free(filter_ctx->buffersink_ctx); >> + avfilter_graph_free(&(filter_ctx->filter_graph)); >> + av_free(filter_ctx); >> + } >> + av_buffer_unref(&hw_device_ctx); >> + free(frame_buf); >> + >> + return ret; >> +} >> -- >> 2.11.0 >> > _______________________________________________ > ffmpeg-devel mailing list > ffmpeg-devel@ffmpeg.org > http://ffmpeg.org/mailman/listinfo/ffmpeg-devel > _______________________________________________ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org http://ffmpeg.org/mailman/listinfo/ffmpeg-devel