On Tue, Oct 28, 2014 at 01:07:30AM +0530, arwa arif wrote: [...] > From 9c5fa6fa8f6091149570cded8ee65d232ae88e97 Mon Sep 17 00:00:00 2001 > From: Arwa Arif <arwaarif1...@gmail.com> > Date: Sat, 25 Oct 2014 22:04:51 +0530 > Subject: [PATCH] [PATCH]lavfi: add xbr filter > > Makefile > > allfilters.c > > filters.texi > > filters.texi > > xbr-filter > > xbr-filter > > xbr-filter
Again, please drop these references > --- > doc/filters.texi | 7 + > libavfilter/Makefile | 1 + > libavfilter/allfilters.c | 1 + > libavfilter/vf_xbr.c | 317 > ++++++++++++++++++++++++++++++++++++++++++++++ > 4 files changed, 326 insertions(+) > create mode 100644 libavfilter/vf_xbr.c > > diff --git a/doc/filters.texi b/doc/filters.texi > index c70ddf3..5fa1d08 100644 > --- a/doc/filters.texi > +++ b/doc/filters.texi > @@ -9159,6 +9159,13 @@ Only deinterlace frames marked as interlaced. > Default value is @samp{all}. > @end table > > +@section xbr > + > +A high-quality magnification filter which is designed for pixel art. It > follows a set > +of edge-detection rules > @url{http://www.libretro.com/forums/viewtopic.php?f=6&t=134}. > +This filter was originally created by Hyllian. The current implementation > scales the > +image by scale factor 2. image -> input > + > @anchor{yadif} > @section yadif > > diff --git a/libavfilter/Makefile b/libavfilter/Makefile > index 6d868e7..2c56e38 100644 > --- a/libavfilter/Makefile > +++ b/libavfilter/Makefile > @@ -198,6 +198,7 @@ OBJS-$(CONFIG_VIDSTABDETECT_FILTER) += > vidstabutils.o vf_vidstabdetect. > OBJS-$(CONFIG_VIDSTABTRANSFORM_FILTER) += vidstabutils.o > vf_vidstabtransform.o > OBJS-$(CONFIG_VIGNETTE_FILTER) += vf_vignette.o > OBJS-$(CONFIG_W3FDIF_FILTER) += vf_w3fdif.o > +OBJS-$(CONFIG_XBR_FILTER) += vf_xbr.o > OBJS-$(CONFIG_YADIF_FILTER) += vf_yadif.o > OBJS-$(CONFIG_ZMQ_FILTER) += f_zmq.o > OBJS-$(CONFIG_ZOOMPAN_FILTER) += vf_zoompan.o > diff --git a/libavfilter/allfilters.c b/libavfilter/allfilters.c > index d88a9ad..2352d44 100644 > --- a/libavfilter/allfilters.c > +++ b/libavfilter/allfilters.c > @@ -213,6 +213,7 @@ void avfilter_register_all(void) > REGISTER_FILTER(VIDSTABTRANSFORM, vidstabtransform, vf); > REGISTER_FILTER(VIGNETTE, vignette, vf); > REGISTER_FILTER(W3FDIF, w3fdif, vf); > + REGISTER_FILTER(XBR, xbr, vf); > REGISTER_FILTER(YADIF, yadif, vf); > REGISTER_FILTER(ZMQ, zmq, vf); > REGISTER_FILTER(ZOOMPAN, zoompan, vf); > diff --git a/libavfilter/vf_xbr.c b/libavfilter/vf_xbr.c > new file mode 100644 > index 0000000..5c97173 > --- /dev/null > +++ b/libavfilter/vf_xbr.c > @@ -0,0 +1,317 @@ > +/* > + * This file is part of FFmpeg. > + * > + * Copyright (c) 2014 Arwa Arif <arwaarif1...@gmail.com> > + * > + * FFmpeg is free software; you can redistribute it and/or > + * modify it under the terms of the GNU Lesser General Public > + * License as published by the Free Software Foundation; either > + * version 2.1 of the License, or (at your option) any later version. > + * > + * FFmpeg is distributed in the hope that it will be useful, > + * but WITHOUT ANY WARRANTY; without even the implied warranty of > + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU > + * Lesser General Public License for more details. > + * > + * You should have received a copy of the GNU Lesser General Public > + * License along with FFmpeg; if not, write to the Free Software > + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 > USA > + */ > + > +/** > + * @file > + * XBR Filter is used for depixelization of image. > + * This is based on Hyllian's 2xBR shader. > + * 2xBR Filter v0.2.5 trailing whitespace > + * Reference : http://board.byuu.org/viewtopic.php?f=10&t=2248 dead link, and replace "Reference :" by "@see http://..." Add a TODO with the following: - x3 and x4 scale - threading > + */ > + > +#include "libavutil/opt.h" > +#include "libavutil/avassert.h" > +#include "libavutil/pixdesc.h" > +#include "internal.h" > + > +typedef struct { > + uint32_t rgbtoyuv[1<<24]; > +} xBRContext; > + > +/** > +* Calculates the weight of difference of the pixels, by transforming these > +* pixels into their Y'UV parts. It then uses the threshold used by HQx > filters: > +* 48*Y + 7*U + 6*V, to give it those smooth looking edges. > +**/ > +static int d(AVFrame *in,int x1,int y1,int x2,int y2,const uint32_t *r2y){ Here and several times below, wrong coding style. > + > +#define YMASK 0xff0000 > +#define UMASK 0x00ff00 > +#define VMASK 0x0000ff > + > + int r1 = *(in->data[0] + y1 * in->linesize[0] + x1*3); > + int g1 = *(in->data[0] + y1 * in->linesize[0] + x1*3 + 1); > + int b1 = *(in->data[0] + y1 * in->linesize[0] + x1*3 + 2); > + > + int r2 = *(in->data[0] + y2 * in->linesize[0] + x2*3); > + int g2 = *(in->data[0] + y2 * in->linesize[0] + x2*3 + 1); > + int b2 = *(in->data[0] + y2 * in->linesize[0] + x2*3 + 2); > + > + uint32_t c1 = (r1 | g1<<8 | b1<<16); > + uint32_t c2 = (r2 | g2<<8 | b2<<16); pointless ( ) > + > + uint32_t yuv1 = r2y[c1 & 0xffffff]; > + uint32_t yuv2 = r2y[c2 & 0xffffff]; > + > + return abs((yuv1 & YMASK) - (yuv2 & YMASK)) > (48 << 16) || > + abs((yuv1 & UMASK) - (yuv2 & UMASK)) > ( 7 << 8) || > + abs((yuv1 & VMASK) - (yuv2 & VMASK)) > ( 6 << 0); > +} > + > +/** > +* Mixes a pixel A, with pixel B, with B's transperancy set to 'a' > +* In other words, A is a solid color (bottom) and B is a transparent color > (top) > +**/ > +static int mix(AVFrame *in,int x1,int y1,int x2,int y2,int a,int color){ > + > + int col1,col2; > + col1 = *(in->data[0] + y1 * in->linesize[0] + x1*3 + color); > + col2 = *(in->data[0] + y2 * in->linesize[0] + x2*3 + color); > + > + return (a*col2 + (2-a)*col1)/2; > +}; > + > +/** > +* Fills the output matrix > +**/ > +static void fill(AVFrame *in,AVFrame *out,int u,int v,int x,int y,int > mode,int new_x,int new_y){ > + > + int r,g,b; > + /*mix colors if they are not on boundary*/ > + if(mode!=0 && u>=0 && v>=0 && u<in->width && v<in->height){ > + > + r = mix(in,u,v,x,y,1,0); > + g = mix(in,u,v,x,y,1,1); > + b = mix(in,u,v,x,y,1,2); Here and several times below, you have tabs. > + > + } else{ > + > + r = *(in->data[0] + y*in->linesize[0] + x*3); > + g = *(in->data[0] + y*in->linesize[0] + x*3 + 1); > + b = *(in->data[0] + y*in->linesize[0] + x*3 + 2); > + } > + > + /*Insert blended color into scaledImageData*/ > + *(out->data[0] + (new_y)*out->linesize[0] + (new_x)*3) = r; > + *(out->data[0] + (new_y)*out->linesize[0] + (new_x)*3 + 1) = g; > + *(out->data[0] + (new_y)*out->linesize[0] + (new_x)*3 + 2) = b; > + > + return; > +} > + Trailing whitespaces here and several times below > +/** > +* Applies the xBR filter rules. > +**/ > +static void apply_edge_detection_rules(AVFrame *in,AVFrame *out,int x,int > y,const uint32_t *r2y){ > + > + /* Matrix: (10 is 0,0 i.e: current pixel) > + -2 | -1| 0| +1| +2 (x) > + ______________________________ > + -2 | [A1][B1][C1] > + -1 | [A0][ A][ B][ C][C4] > + 0 | [D0][ D][ E][ F][F4] > + +1 | [G0][ G][ H][ I][I4] > + +2 | [G5][H5][I5] > + |(y)| > + */ > + > + /*Cached Pixel Weight Difference*/ > + int d_E_D = d(in, x, y, x-1, y, r2y); > + int d_E_B = d(in, x, y, x, y-1, r2y); > + int d_E_F = d(in, x, y, x+1, y, r2y); > + int d_E_H = d(in, x, y, x, y+1, r2y); > + int d_E_G = d(in, x, y, x-1, y+1, r2y); > + int d_E_C = d(in, x, y, x+1, y-1, r2y); > + int d_A_D0 = d(in, x-1, y-1, x-2, y, r2y); > + int d_A_B1 = d(in, x-1, y-1, x, y-2, r2y); > + int d_D_B = d(in, x-1, y, x, y-1, r2y); > + int d_D_H = d(in, x-1, y, x, y+1, r2y); > + int d_D_A0 = d(in, x-1, y, x-2, y-1, r2y); > + int d_B_F = d(in, x, y-1, x+1, y, r2y); > + int d_B_A1 = d(in, x, y-1, x-1, y-2, r2y); > + int d_E_A = d(in, x, y, x-1, y-1, r2y); > + int d_E_I = d(in, x, y, x+1, y+1, r2y); > + int d_C_F4 = d(in, x+1, y-1, x+2, y, r2y); > + int d_C_B1 = d(in, x+1, y-1, x, y-2, r2y); > + int d_F_H = d(in, x+1, y, x, y+1, r2y); > + int d_F_C4 = d(in, x+1, y, x+2, y-1, r2y); > + int d_B_C1 = d(in, x, y-1, x+1, y-2, r2y); > + int d_G_D0 = d(in, x-1, y+1, x-2, y, r2y); > + int d_G_H5 = d(in, x-1, y+1, x, y+2, r2y); > + int d_H_G5 = d(in, x, y+1, x-1, y+2, r2y); > + int d_D_G0 = d(in, x-1, y, x-2, y+1, r2y); > + int d_I_F4 = d(in, x+1, y+1, x+2, y, r2y); > + int d_I_H5 = d(in, x+1, y+1, x, y+2, r2y); > + int d_H_I5 = d(in, x, y+1, x+1, y+2, r2y); > + int d_H_I4 = d(in, x, y+1, x+2, y+1, r2y); > + > + /** > + * Note: On reading edge detection rules > + * > + * Each edge rule is an if..else statement, everytime on else, the > + * current pixel color pointed to by matrix[0] is used to color it's edge. > + * > + * Each if statement checks wether the sum of weight difference on the > left is > + * lesser than that of the right weight differece. > + */ > + > + /** > + * Top Left Edge Detection Rule > + **/ > + if ((d_E_G+d_E_C+d_A_D0+d_A_B1+(4*d_D_B)) < > (d_D_H+d_D_A0+d_B_F+d_B_A1+(4*d_E_A))){ > + // Figure what color to blend with current pixel -->10 > + if(d_E_D <= d_E_B) > + fill(in,out,x-1,y,x,y,1,x*2,y*2); > + else > + fill(in,out,x,y-1,x,y,1,x*2,y*2); > + } else{ > + /*Insert current pixel color into scaledImageData*/ > + fill(in,out,x,y,x,y,0,x*2,y*2); > + } You can probably isolate this in a function or a macro somehow, but well... [...] Can you add a FATE test similar to this? See tests/fate/filter-video.mak. What reference code did you use to check if your code generated the same bitmaps? -- Clément B.
pgpSc4d1nSldl.pgp
Description: PGP signature
_______________________________________________ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org http://ffmpeg.org/mailman/listinfo/ffmpeg-devel