previously support little endian only because of fate problem generally native endian code is faster
require 'tests/fate-run: support both le/be formats on pixfmts' need someone tests it on BE machine Signed-off-by: Muhammad Faiz <mfc...@gmail.com> --- libavfilter/drawutils.c | 43 ++++++++++++++++---------- libavfilter/vf_lut.c | 81 +++++++++++++++++++++---------------------------- 2 files changed, 62 insertions(+), 62 deletions(-) diff --git a/libavfilter/drawutils.c b/libavfilter/drawutils.c index e533040..610f8b6 100644 --- a/libavfilter/drawutils.c +++ b/libavfilter/drawutils.c @@ -173,6 +173,22 @@ void ff_copy_rectangle(uint8_t *dst[4], int dst_linesize[4], } } +static int is_native_endian(const AVPixFmtDescriptor *desc) +{ + int len = strlen(desc->name); +#if HAVE_BIGENDIAN + const char *native = "be", *foreign = "le"; +#else + const char *native = "le", *foreign = "be"; +#endif + + if (!strcmp(desc->name+len-2, native)) + return 1; + if (!strcmp(desc->name+len-2, foreign)) + return 0; + return 1; +} + int ff_draw_init(FFDrawContext *draw, enum AVPixelFormat format, unsigned flags) { const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(format); @@ -184,13 +200,13 @@ int ff_draw_init(FFDrawContext *draw, enum AVPixelFormat format, unsigned flags) return AVERROR(EINVAL); if (desc->flags & ~(AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_RGB | AV_PIX_FMT_FLAG_PSEUDOPAL | AV_PIX_FMT_FLAG_ALPHA)) return AVERROR(ENOSYS); + if (!is_native_endian(desc)) + return AVERROR(ENOSYS); for (i = 0; i < desc->nb_components; i++) { c = &desc->comp[i]; /* for now, only 8-16 bits formats */ if (c->depth < 8 || c->depth > 16) return AVERROR(ENOSYS); - if (desc->flags & AV_PIX_FMT_FLAG_BE) - return AVERROR(ENOSYS); if (c->plane >= MAX_PLANES) return AVERROR(ENOSYS); /* strange interleaving */ @@ -259,7 +275,7 @@ void ff_draw_color(FFDrawContext *draw, FFDrawColor *color, const uint8_t rgba[4 } else if (draw->format == AV_PIX_FMT_GRAY8 || draw->format == AV_PIX_FMT_GRAY8A) { color->comp[0].u8[0] = RGB_TO_Y_CCIR(rgba[0], rgba[1], rgba[2]); color->comp[1].u8[0] = rgba[3]; - } else if (draw->format == AV_PIX_FMT_GRAY16LE || draw->format == AV_PIX_FMT_YA16LE) { + } else if (draw->format == AV_PIX_FMT_GRAY16 || draw->format == AV_PIX_FMT_YA16) { color->comp[0].u8[0] = RGB_TO_Y_CCIR(rgba[0], rgba[1], rgba[2]); color->comp[0].u16[0] = color->comp[0].u8[0] << 8; color->comp[1].u8[0] = rgba[3]; @@ -317,11 +333,6 @@ void ff_fill_rectangle(FFDrawContext *draw, FFDrawColor *color, return; p = p0; - if (HAVE_BIGENDIAN && draw->desc->comp[0].depth > 8) { - for (x = 0; 2*x < draw->pixelstep[plane]; x++) - color_tmp.comp[plane].u16[x] = av_bswap16(color_tmp.comp[plane].u16[x]); - } - /* copy first line from color */ for (x = 0; x < wp; x++) { memcpy(p, color_tmp.comp[plane].u8, draw->pixelstep[plane]); @@ -412,19 +423,19 @@ static void blend_line16(uint8_t *dst, unsigned src, unsigned alpha, if (left) { unsigned suba = (left * alpha) >> hsub; - uint16_t value = AV_RL16(dst); - AV_WL16(dst, (value * (0x10001 - suba) + src * suba) >> 16); + uint16_t value = AV_RN16(dst); + AV_WN16(dst, (value * (0x10001 - suba) + src * suba) >> 16); dst += dx; } for (x = 0; x < w; x++) { - uint16_t value = AV_RL16(dst); - AV_WL16(dst, (value * tau + asrc) >> 16); + uint16_t value = AV_RN16(dst); + AV_WN16(dst, (value * tau + asrc) >> 16); dst += dx; } if (right) { unsigned suba = (right * alpha) >> hsub; - uint16_t value = AV_RL16(dst); - AV_WL16(dst, (value * (0x10001 - suba) + src * suba) >> 16); + uint16_t value = AV_RN16(dst); + AV_WN16(dst, (value * (0x10001 - suba) + src * suba) >> 16); } } @@ -516,7 +527,7 @@ static void blend_pixel16(uint8_t *dst, unsigned src, unsigned alpha, unsigned xmmod = 7 >> l2depth; unsigned mbits = (1 << (1 << l2depth)) - 1; unsigned mmult = 255 / mbits; - uint16_t value = AV_RL16(dst); + uint16_t value = AV_RN16(dst); for (y = 0; y < h; y++) { xm = xm0; @@ -528,7 +539,7 @@ static void blend_pixel16(uint8_t *dst, unsigned src, unsigned alpha, mask += mask_linesize; } alpha = (t >> shift) * alpha; - AV_WL16(dst, ((0x10001 - alpha) * value + alpha * src) >> 16); + AV_WN16(dst, ((0x10001 - alpha) * value + alpha * src) >> 16); } static void blend_pixel(uint8_t *dst, unsigned src, unsigned alpha, diff --git a/libavfilter/vf_lut.c b/libavfilter/vf_lut.c index 5148663..4a6f6d2 100644 --- a/libavfilter/vf_lut.c +++ b/libavfilter/vf_lut.c @@ -115,18 +115,18 @@ static av_cold void uninit(AVFilterContext *ctx) AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUVA444P, \ AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ420P, \ AV_PIX_FMT_YUVJ440P, \ - AV_PIX_FMT_YUV444P9LE, AV_PIX_FMT_YUV422P9LE, AV_PIX_FMT_YUV420P9LE, \ - AV_PIX_FMT_YUV444P10LE, AV_PIX_FMT_YUV422P10LE, AV_PIX_FMT_YUV420P10LE, AV_PIX_FMT_YUV440P10LE, \ - AV_PIX_FMT_YUV444P12LE, AV_PIX_FMT_YUV422P12LE, AV_PIX_FMT_YUV420P12LE, AV_PIX_FMT_YUV440P12LE, \ - AV_PIX_FMT_YUV444P14LE, AV_PIX_FMT_YUV422P14LE, AV_PIX_FMT_YUV420P14LE, \ - AV_PIX_FMT_YUV444P16LE, AV_PIX_FMT_YUV422P16LE, AV_PIX_FMT_YUV420P16LE, \ - AV_PIX_FMT_YUVA444P16LE, AV_PIX_FMT_YUVA422P16LE, AV_PIX_FMT_YUVA420P16LE + AV_PIX_FMT_YUV444P9, AV_PIX_FMT_YUV422P9, AV_PIX_FMT_YUV420P9, \ + AV_PIX_FMT_YUV444P10, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUV440P10, \ + AV_PIX_FMT_YUV444P12, AV_PIX_FMT_YUV422P12, AV_PIX_FMT_YUV420P12, AV_PIX_FMT_YUV440P12, \ + AV_PIX_FMT_YUV444P14, AV_PIX_FMT_YUV422P14, AV_PIX_FMT_YUV420P14, \ + AV_PIX_FMT_YUV444P16, AV_PIX_FMT_YUV422P16, AV_PIX_FMT_YUV420P16, \ + AV_PIX_FMT_YUVA444P16, AV_PIX_FMT_YUVA422P16, AV_PIX_FMT_YUVA420P16 #define RGB_FORMATS \ AV_PIX_FMT_ARGB, AV_PIX_FMT_RGBA, \ AV_PIX_FMT_ABGR, AV_PIX_FMT_BGRA, \ AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24, \ - AV_PIX_FMT_RGB48LE, AV_PIX_FMT_RGBA64LE + AV_PIX_FMT_RGB48, AV_PIX_FMT_RGBA64 static const enum AVPixelFormat yuv_pix_fmts[] = { YUV_FORMATS, AV_PIX_FMT_NONE }; static const enum AVPixelFormat rgb_pix_fmts[] = { RGB_FORMATS, AV_PIX_FMT_NONE }; @@ -226,32 +226,32 @@ static int config_props(AVFilterLink *inlink) case AV_PIX_FMT_YUVA420P: case AV_PIX_FMT_YUVA422P: case AV_PIX_FMT_YUVA444P: - case AV_PIX_FMT_YUV420P9LE: - case AV_PIX_FMT_YUV422P9LE: - case AV_PIX_FMT_YUV444P9LE: - case AV_PIX_FMT_YUVA420P9LE: - case AV_PIX_FMT_YUVA422P9LE: - case AV_PIX_FMT_YUVA444P9LE: - case AV_PIX_FMT_YUV420P10LE: - case AV_PIX_FMT_YUV422P10LE: - case AV_PIX_FMT_YUV440P10LE: - case AV_PIX_FMT_YUV444P10LE: - case AV_PIX_FMT_YUVA420P10LE: - case AV_PIX_FMT_YUVA422P10LE: - case AV_PIX_FMT_YUVA444P10LE: - case AV_PIX_FMT_YUV420P12LE: - case AV_PIX_FMT_YUV422P12LE: - case AV_PIX_FMT_YUV440P12LE: - case AV_PIX_FMT_YUV444P12LE: - case AV_PIX_FMT_YUV420P14LE: - case AV_PIX_FMT_YUV422P14LE: - case AV_PIX_FMT_YUV444P14LE: - case AV_PIX_FMT_YUV420P16LE: - case AV_PIX_FMT_YUV422P16LE: - case AV_PIX_FMT_YUV444P16LE: - case AV_PIX_FMT_YUVA420P16LE: - case AV_PIX_FMT_YUVA422P16LE: - case AV_PIX_FMT_YUVA444P16LE: + case AV_PIX_FMT_YUV420P9: + case AV_PIX_FMT_YUV422P9: + case AV_PIX_FMT_YUV444P9: + case AV_PIX_FMT_YUVA420P9: + case AV_PIX_FMT_YUVA422P9: + case AV_PIX_FMT_YUVA444P9: + case AV_PIX_FMT_YUV420P10: + case AV_PIX_FMT_YUV422P10: + case AV_PIX_FMT_YUV440P10: + case AV_PIX_FMT_YUV444P10: + case AV_PIX_FMT_YUVA420P10: + case AV_PIX_FMT_YUVA422P10: + case AV_PIX_FMT_YUVA444P10: + case AV_PIX_FMT_YUV420P12: + case AV_PIX_FMT_YUV422P12: + case AV_PIX_FMT_YUV440P12: + case AV_PIX_FMT_YUV444P12: + case AV_PIX_FMT_YUV420P14: + case AV_PIX_FMT_YUV422P14: + case AV_PIX_FMT_YUV444P14: + case AV_PIX_FMT_YUV420P16: + case AV_PIX_FMT_YUV422P16: + case AV_PIX_FMT_YUV444P16: + case AV_PIX_FMT_YUVA420P16: + case AV_PIX_FMT_YUVA422P16: + case AV_PIX_FMT_YUVA444P16: min[Y] = 16 * (1 << (desc->comp[0].depth - 8)); min[U] = 16 * (1 << (desc->comp[1].depth - 8)); min[V] = 16 * (1 << (desc->comp[2].depth - 8)); @@ -261,8 +261,8 @@ static int config_props(AVFilterLink *inlink) max[V] = 240 * (1 << (desc->comp[2].depth - 8)); max[A] = (1 << desc->comp[3].depth) - 1; break; - case AV_PIX_FMT_RGB48LE: - case AV_PIX_FMT_RGBA64LE: + case AV_PIX_FMT_RGB48: + case AV_PIX_FMT_RGBA64: min[0] = min[1] = min[2] = min[3] = 0; max[0] = max[1] = max[2] = max[3] = 65535; break; @@ -364,17 +364,10 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in) for (j = 0; j < w; j++) { switch (step) { -#if HAVE_BIGENDIAN - case 4: outrow[3] = av_bswap16(tab[3][av_bswap16(inrow[3])]); // Fall-through - case 3: outrow[2] = av_bswap16(tab[2][av_bswap16(inrow[2])]); // Fall-through - case 2: outrow[1] = av_bswap16(tab[1][av_bswap16(inrow[1])]); // Fall-through - default: outrow[0] = av_bswap16(tab[0][av_bswap16(inrow[0])]); -#else case 4: outrow[3] = tab[3][inrow[3]]; // Fall-through case 3: outrow[2] = tab[2][inrow[2]]; // Fall-through case 2: outrow[1] = tab[1][inrow[1]]; // Fall-through default: outrow[0] = tab[0][inrow[0]]; -#endif } outrow += step; inrow += step; @@ -429,11 +422,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in) for (i = 0; i < h; i++) { for (j = 0; j < w; j++) { -#if HAVE_BIGENDIAN - outrow[j] = av_bswap16(tab[av_bswap16(inrow[j])]); -#else outrow[j] = tab[inrow[j]]; -#endif } inrow += in_linesize; outrow += out_linesize; -- 2.5.0 _______________________________________________ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org http://ffmpeg.org/mailman/listinfo/ffmpeg-devel