I think the segmentation fault is maybe because of wrong input format. The
code runs for rgb24 only. I have attached the updated patch along with the
tested input file. I was not able to attach a video file. So, for now I am
attaching an image instead.
On Sun, Oct 26, 2014 at 4:13 AM, Michael Niedermayer <michae...@gmx.at>
wrote:

> On Sat, Oct 25, 2014 at 11:32:43PM +0530, arwa arif wrote:
> > Can you please specify what is meant by stand-alone changes? Do I need to
> > add the non-default functions in different commit? I am not very sure if
> I
> > understood it right. Apart from that, I have updated the patch.
> >
> > On Sat, Oct 25, 2014 at 10:16 PM, Nicolas George <geo...@nsup.org>
> wrote:
> >
> > > Le quartidi 4 brumaire, an CCXXIII, arwa arif a écrit :
> > > > > please post a new patch instead of a patch on top of a previous
> > > > > patch
> > > >  libavfilter/vf_xbr.c |  303
> > > ++++++++++++++++++++++++++++++++++++++++++++++++++
> > > >  1 file changed, 303 insertions(+)
> > > >  create mode 100644 libavfilter/vf_xbr.c
> > >
> > > This patch does not contain the changes to Makefile and allfilters.c,
> so I
> > > believe you still have a bit of tweaking to do with Git.
> > >
> > > If you used a branch (that is widely advisable), you should be able to
> type
> > > this:
> > >
> > > git log --stat master..
> > >
> > > Then you should see a single commit, yours, with changes to Makefile,
> > > allfilters.c and all other common files you needed to changes, and of
> > > course
> > > the new file(s).
> > >
> > > (If you have stand-alone changes, such as moving code into a shared
> > > function
> > > to use in your actual patch, then it should go in a separate commit.
> But it
> > > does not seem to apply here.)
> > >
> > > Also, I suspect you forgot to add the documentation for the filter in
> > > doc/filters.texi. A few words are enough, but at the very least let
> people
> > > know what it does, because xbr looks like just three random letters --
> not
> > > your fault of course.
> > >
> > > Regards,
> > >
> > > --
> > >   Nicolas George
> > >
> > > _______________________________________________
> > > ffmpeg-devel mailing list
> > > ffmpeg-devel@ffmpeg.org
> > > http://ffmpeg.org/mailman/listinfo/ffmpeg-devel
> > >
> > >
>
> >  doc/filters.texi         |    7 +
> >  libavfilter/Makefile     |    1
> >  libavfilter/allfilters.c |    1
> >  libavfilter/vf_xbr.c     |  303
> +++++++++++++++++++++++++++++++++++++++++++++++
> >  4 files changed, 312 insertions(+)
> > a92794344ddf0d5d4ea3205091ee4a19699c5c06
> 0001-PATCH-lavfi-add-xbr-filter.patch
> > From cf3a7a6bcb9735c6f6157f80da208c1c191b3e02 Mon Sep 17 00:00:00 2001
> > From: Arwa Arif <arwaarif1...@gmail.com>
> > Date: Sat, 25 Oct 2014 22:04:51 +0530
> > Subject: [PATCH] [PATCH]lavfi: add xbr filter
> >
> > Makefile
> >
> > allfilters.c
> >
> > filters.texi
> > ---
> >  doc/filters.texi         |    7 ++
> >  libavfilter/Makefile     |    1 +
> >  libavfilter/allfilters.c |    1 +
> >  libavfilter/vf_xbr.c     |  303
> ++++++++++++++++++++++++++++++++++++++++++++++
> >  4 files changed, 312 insertions(+)
> >  create mode 100644 libavfilter/vf_xbr.c
>
> I tried the filter but it segfaults
> ./ffplay matrixbench_mpeg2.mpg -vf xbr
>
> Program received signal SIGSEGV, Segmentation fault.
> [Switching to Thread 0x7fffcfff7700 (LWP 14880)]
> 0x00000000004b40c5 in d (in=0x7fffc85eb000, x1=-1, y1=576, x2=0, y2=577)
> at libavfilter/vf_xbr.c:49
> 49          int r2 = *(in->data[0] + y2 * in->linesize[0] + x2*3);
> (gdb) bt
> #0  0x00000000004b40c5 in d (in=0x7fffc85eb000, x1=-1, y1=576, x2=0,
> y2=577) at libavfilter/vf_xbr.c:49
> #1  0x00000000004b49db in apply_edge_detection_rules (in=0x7fffc85eb000,
> out=0x7fffc85ead20, x=0, y=575) at libavfilter/vf_xbr.c:159
> #2  0x00000000004b5173 in filter_frame (inlink=0x7fffc85a7800,
> in=0x7fffc85eb000) at libavfilter/vf_xbr.c:270
> #3  0x0000000000444270 in ff_filter_frame_framed (link=0x7fffc85a7800,
> frame=0x7fffc85eb000) at libavfilter/avfilter.c:1103
> #4  0x00000000004447c4 in ff_filter_frame (link=0x7fffc85a7800,
> frame=0x7fffc85eb000) at libavfilter/avfilter.c:1183
> #5  0x00000000004a4aff in filter_frame (link=0x7fffc85a70a0, in=0x0) at
> libavfilter/vf_scale.c:530
> #6  0x0000000000444270 in ff_filter_frame_framed (link=0x7fffc85a70a0,
> frame=0x7fffc85ead20) at libavfilter/avfilter.c:1103
> #7  0x00000000004447c4 in ff_filter_frame (link=0x7fffc85a70a0,
> frame=0x7fffc85ead20) at libavfilter/avfilter.c:1183
> #8  0x000000000044b1e3 in request_frame (link=0x7fffc85a70a0) at
> libavfilter/buffersrc.c:499
> #9  0x000000000044212b in ff_request_frame (link=0x7fffc85a70a0) at
> libavfilter/avfilter.c:351
> #10 0x0000000000442159 in ff_request_frame (link=0x7fffc85a7800) at
> libavfilter/avfilter.c:353
> #11 0x0000000000442159 in ff_request_frame (link=0x7fffc85a71e0) at
> libavfilter/avfilter.c:353
> #12 0x0000000000442159 in ff_request_frame (link=0x7fffc85a7680) at
> libavfilter/avfilter.c:353
> #13 0x0000000000442159 in ff_request_frame (link=0x7fffc85a6c00) at
> libavfilter/avfilter.c:353
> #14 0x0000000000448f1e in av_buffersink_get_frame_flags
> (ctx=0x7fffc85a6220, frame=0x7fffc80008c0, flags=0) at
> libavfilter/buffersink.c:137
> #15 0x000000000041f526 in video_thread (arg=0x7fffeb1a7040) at
> ffplay.c:2168
> #16 0x00007ffff64a6fd5 in ?? () from
> /usr/lib/x86_64-linux-gnu/libSDL-1.2.so.0
> #17 0x00007ffff64ea999 in ?? () from
> /usr/lib/x86_64-linux-gnu/libSDL-1.2.so.0
> #18 0x00007ffff006ae9a in start_thread (arg=0x7fffcfff7700) at
> pthread_create.c:308
> #19 0x00007fffefd9831d in clone () at
> ../sysdeps/unix/sysv/linux/x86_64/clone.S:112
> #20 0x0000000000000000 in ?? ()
>
>
> [...]
>
> > +const int THRESHHOLD_Y = 48;
> > +const int THRESHHOLD_U = 7;
> > +const int THRESHHOLD_V = 6;
>
> these should be #define
>
>
> > +
> > +/**
> > +* Calculates the weight of difference of the pixels, by transforming
> these
> > +* pixels into their Y'UV parts. It then uses the threshold used by HQx
> filters:
> > +* 48*Y + 7*U + 6*V, to give it those smooth looking edges.
> > +**/
> > +static int d(AVFrame *in,int x1,int y1,int x2,int y2){
> > +
> > +    int r1 = *(in->data[0] + y1 * in->linesize[0] + x1*3);
> > +    int g1 = *(in->data[0] + y1 * in->linesize[0] + x1*3 + 1);
> > +    int b1 = *(in->data[0] + y1 * in->linesize[0] + x1*3 + 2);
> > +
> > +    int r2 = *(in->data[0] + y2 * in->linesize[0] + x2*3);
> > +    int g2 = *(in->data[0] + y2 * in->linesize[0] + x2*3 + 1);
> > +    int b2 = *(in->data[0] + y2 * in->linesize[0] + x2*3 + 2);
> > +
> > +    int r = abs(r1 - r2);
> > +    int g = abs(g1 - g2);
> > +    int b = abs(b1 - b2);
> > +
> > +    /*Convert RGB to Y'UV*/
> > +    int y = ( (  66 * r + 129 * g +  25 * b + 128) >> 8) +  16;
> > +    int u = ( ( -38 * r -  74 * g + 112 * b + 128) >> 8) + 128;
> > +    int v = ( ( 112 * r -  94 * g -  18 * b + 128) >> 8) + 128;
> > +
> > +    /*Add HQx filters threshold & return*/
> > +    return (y * THRESHHOLD_Y) + (u* THRESHHOLD_U) + (v* THRESHHOLD_V);
>
> the position of the abs() looks strange
> if this is supposed to be a vector distance in yuv space then the
> abs are needed on the yuv terms
>
>
> [...]
>
> > +    av_frame_copy_props(out, in);
> > +    for(i=0;i<inlink->w;i++)
> > +     for(j=0;j<inlink->h;j++)
> > +             apply_edge_detection_rules(in,out,i,j);
>
> tabs and trailing whitespace isnt allowed in ffmpeg git
>
> [...]
>
> --
> Michael     GnuPG fingerprint: 9FF2128B147EF6730BADF133611EC787040B0FAB
>
> Everything should be made as simple as possible, but not simpler.
> -- Albert Einstein
>
> _______________________________________________
> ffmpeg-devel mailing list
> ffmpeg-devel@ffmpeg.org
> http://ffmpeg.org/mailman/listinfo/ffmpeg-devel
>
>
From bbf4a8982bda6d9c1818b3d645869270cfbe049b Mon Sep 17 00:00:00 2001
From: Arwa Arif <arwaarif1...@gmail.com>
Date: Sat, 25 Oct 2014 22:04:51 +0530
Subject: [PATCH] [PATCH]lavfi: add xbr filter

Makefile

allfilters.c

filters.texi

filters.texi

xbr-filter
---
 doc/filters.texi         |    7 ++
 libavfilter/Makefile     |    1 +
 libavfilter/allfilters.c |    1 +
 libavfilter/vf_xbr.c     |  299 ++++++++++++++++++++++++++++++++++++++++++++++
 4 files changed, 308 insertions(+)
 create mode 100644 libavfilter/vf_xbr.c

diff --git a/doc/filters.texi b/doc/filters.texi
index c70ddf3..5fa1d08 100644
--- a/doc/filters.texi
+++ b/doc/filters.texi
@@ -9159,6 +9159,13 @@ Only deinterlace frames marked as interlaced.
 Default value is @samp{all}.
 @end table
 
+@section xbr
+
+A high-quality magnification filter which is designed for pixel art. It follows a set
+of edge-detection rules @url{http://www.libretro.com/forums/viewtopic.php?f=6&t=134}.
+This filter was originally created by Hyllian. The current implementation scales the
+image by scale factor 2.
+
 @anchor{yadif}
 @section yadif
 
diff --git a/libavfilter/Makefile b/libavfilter/Makefile
index 6d868e7..2c56e38 100644
--- a/libavfilter/Makefile
+++ b/libavfilter/Makefile
@@ -198,6 +198,7 @@ OBJS-$(CONFIG_VIDSTABDETECT_FILTER)          += vidstabutils.o vf_vidstabdetect.
 OBJS-$(CONFIG_VIDSTABTRANSFORM_FILTER)       += vidstabutils.o vf_vidstabtransform.o
 OBJS-$(CONFIG_VIGNETTE_FILTER)               += vf_vignette.o
 OBJS-$(CONFIG_W3FDIF_FILTER)                 += vf_w3fdif.o
+OBJS-$(CONFIG_XBR_FILTER)                    += vf_xbr.o
 OBJS-$(CONFIG_YADIF_FILTER)                  += vf_yadif.o
 OBJS-$(CONFIG_ZMQ_FILTER)                    += f_zmq.o
 OBJS-$(CONFIG_ZOOMPAN_FILTER)                += vf_zoompan.o
diff --git a/libavfilter/allfilters.c b/libavfilter/allfilters.c
index d88a9ad..2352d44 100644
--- a/libavfilter/allfilters.c
+++ b/libavfilter/allfilters.c
@@ -213,6 +213,7 @@ void avfilter_register_all(void)
     REGISTER_FILTER(VIDSTABTRANSFORM, vidstabtransform, vf);
     REGISTER_FILTER(VIGNETTE,       vignette,       vf);
     REGISTER_FILTER(W3FDIF,         w3fdif,         vf);
+    REGISTER_FILTER(XBR,            xbr,            vf);
     REGISTER_FILTER(YADIF,          yadif,          vf);
     REGISTER_FILTER(ZMQ,            zmq,            vf);
     REGISTER_FILTER(ZOOMPAN,        zoompan,        vf);
diff --git a/libavfilter/vf_xbr.c b/libavfilter/vf_xbr.c
new file mode 100644
index 0000000..e947491
--- /dev/null
+++ b/libavfilter/vf_xbr.c
@@ -0,0 +1,299 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * Copyright (c) 2014 Arwa Arif <arwaarif1...@gmail.com>
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * XBR Filter is used for depixelization of image.
+ * This is based on Hyllian's 2xBR shader.
+ * 2xBR Filter v0.2.5 
+ * Reference : http://board.byuu.org/viewtopic.php?f=10&t=2248
+ */
+
+#include "libavutil/opt.h"
+#include "libavutil/avassert.h"
+#include "libavutil/pixdesc.h"
+#include "internal.h"
+
+/**
+* Calculates the weight of difference of the pixels, by transforming these
+* pixels into their Y'UV parts. It then uses the threshold used by HQx filters:
+* 48*Y + 7*U + 6*V, to give it those smooth looking edges.
+**/
+static int d(AVFrame *in,int x1,int y1,int x2,int y2){
+
+    int r1 = *(in->data[0] + y1 * in->linesize[0] + x1*3);
+    int g1 = *(in->data[0] + y1 * in->linesize[0] + x1*3 + 1);
+    int b1 = *(in->data[0] + y1 * in->linesize[0] + x1*3 + 2);
+
+    int r2 = *(in->data[0] + y2 * in->linesize[0] + x2*3);
+    int g2 = *(in->data[0] + y2 * in->linesize[0] + x2*3 + 1);
+    int b2 = *(in->data[0] + y2 * in->linesize[0] + x2*3 + 2);
+
+    int r = (r1 - r2);
+    int g = (g1 - g2);
+    int b = (b1 - b2);	
+
+    /*Convert RGB to Y'UV*/
+    int y = abs( ( (  66 * r + 129 * g +  25 * b + 128) >> 8) +  16); 
+    int u = abs( ( ( -38 * r -  74 * g + 112 * b + 128) >> 8) + 128);
+    int v = abs( ( ( 112 * r -  94 * g -  18 * b + 128) >> 8) + 128);
+
+    /*Add HQx filters threshold & return*/
+    return (y*48) + (u*7) + (v*6);
+}
+
+/**
+* Mixes a pixel A, with pixel B, with B's transperancy set to 'a'
+* In other words, A is a solid color (bottom) and B is a transparent color (top)
+**/
+static int mix(AVFrame *in,int x1,int y1,int x2,int y2,int a,int mode){
+    /*If red color*/
+    int col1,col2;
+    if(mode==0){
+        col1 = *(in->data[0] + y1 * in->linesize[0] + x1*3);
+        col2 = *(in->data[0] + y2 * in->linesize[0] + x2*3);
+    }
+
+    /*If green color*/
+    else if(mode==1){
+        col1 = *(in->data[0] + y1 * in->linesize[0] + x1*3 + 1);
+        col2 = *(in->data[0] + y2 * in->linesize[0] + x2*3 + 1);
+    }
+
+    /*If blue color*/
+    else{
+        col1 = *(in->data[0] + y1 * in->linesize[0] + x1*3 + 2);
+        col2 = *(in->data[0] + y2 * in->linesize[0] + x2*3 + 2);
+    }
+
+    return (a*col2 + (2-a)*col1)/2;
+};
+
+/**
+* Fills the output matrix
+**/
+static void fill(AVFrame *in,AVFrame *out,int u,int v,int x,int y,int mode,int new_x,int new_y){
+
+    int r,g,b;
+    /*mix colors*/
+    if(mode==0){
+
+	r = *(in->data[0] + y*in->linesize[0] + x*3);
+        g = *(in->data[0] + y*in->linesize[0] + x*3 + 1);
+        b = *(in->data[0] + y*in->linesize[0] + x*3 + 2);
+
+    } else{
+
+	r = mix(in,u,v,x,y,1,0);
+        g = mix(in,u,v,x,y,1,1);
+        b = mix(in,u,v,x,y,1,2);
+    }
+
+    /*Insert blended color into scaledImageData*/
+    *(out->data[0] + (new_y)*out->linesize[0] + (new_x)*3) = r;
+    *(out->data[0] + (new_y)*out->linesize[0] + (new_x)*3 + 1) = g;
+    *(out->data[0] + (new_y)*out->linesize[0] + (new_x)*3 + 2) = b;
+
+    return;
+}
+	
+/**
+* Applies the xBR filter rules.
+**/
+static void apply_edge_detection_rules(AVFrame *in,AVFrame *out,int x,int y){
+
+    /* Matrix: (10 is 0,0 i.e: current pixel)
+    -2 | -1| 0| +1| +2 (x)
+    ______________________________
+    -2 | [ 0][ 1][ 2]
+    -1 | [ 3][ 4][ 5][ 6][ 7]
+     0 | [ 8][ 9][10][11][12]
+    +1 | [13][14][15][16][17]
+    +2 | [18][19][20]
+    |(y)|
+    */
+
+    /*Cached Pixel Weight Difference*/
+    int d_10_9   =  d(in,  x,    y,    x-1,  y);
+    int d_10_5   =  d(in,  x,    y,    x,    y-1);
+    int d_10_11  =  d(in,  x,    y,    x+1,  y);
+    int d_10_15  =  d(in,  x,    y,    x,    y+1);
+    int d_10_14  =  d(in,  x,    y,    x-1,  y+1);
+    int d_10_6   =  d(in,  x,    y,    x+1,  y-1);
+    int d_4_8    =  d(in,  x-1,  y-1,  x-2,  y);
+    int d_4_1    =  d(in,  x-1,  y-1,  x,    y-2);
+    int d_9_5    =  d(in,  x-1,  y,    x,    y-1);
+    int d_9_15   =  d(in,  x-1,  y,    x,    y+1);
+    int d_9_3    =  d(in,  x-1,  y,    x-2,  y-1);
+    int d_5_11   =  d(in,  x,    y-1,  x+1,  y);
+    int d_5_0    =  d(in,  x,    y-1,  x-1,  y-2);
+    int d_10_4   =  d(in,  x,    y,    x-1,  y-1);
+    int d_10_16  =  d(in,  x,    y,    x+1,  y+1);
+    int d_6_12   =  d(in,  x+1,  y-1,  x+2,  y);
+    int d_6_1    =  d(in,  x+1,  y-1,  x,    y-2);
+    int d_11_15  =  d(in,  x+1,  y,    x,    y+1);
+    int d_11_7   =  d(in,  x+1,  y,    x+2,  y-1);
+    int d_5_2    =  d(in,  x,    y-1,  x+1,  y-2);
+    int d_14_8   =  d(in,  x-1,  y+1,  x-2,  y);
+    int d_14_19  =  d(in,  x-1,  y+1,  x,    y+2);
+    int d_15_18  =  d(in,  x,    y+1,  x-1,  y+2);
+    int d_9_13   =  d(in,  x-1,  y,    x-2,  y+1);
+    int d_16_12  =  d(in,  x+1,  y+1,  x+2,  y);
+    int d_16_19  =  d(in,  x+1,  y+1,  x,    y+2);
+    int d_15_20  =  d(in,  x,    y+1,  x+1,  y+2);
+    int d_15_17  =  d(in,  x,    y+1,  x+2,  y+1);
+
+    /**
+    * Note: On reading edge detection rules
+    *
+    * Each edge rule is an if..else statement, everytime on else, the
+    * current pixel color pointed to by matrix[0] is used to color it's edge.
+    *
+    * Each if statement checks wether the sum of weight difference on the left is
+    * lesser than that of the right weight differece.
+    */
+
+    /**
+    * Top Left Edge Detection Rule
+    **/	
+    if ((d_10_14+d_10_6+d_4_8+d_4_1+(4*d_9_5)) < (d_9_15+d_9_3+d_5_11+d_5_0+(4*d_10_4))){
+	// Figure what color to blend with current pixel -->10
+	if(d_10_9 <= d_10_5)
+	    fill(in,out,x-1,y,x,y,1,x*2,y*2);
+	else
+	    fill(in,out,x,y-1,x,y,1,x*2,y*2);
+    } else{
+	/*Insert current pixel color into scaledImageData*/
+        fill(in,out,x,y,x,y,0,x*2,y*2);
+    }
+    /**
+    * Top Right Edge Detection Rule
+    **/
+    if ((d_10_16+d_10_4+d_6_12+d_6_1+(4*d_5_11)) < (d_11_15+d_11_7+d_9_5+d_5_2+(4*d_10_6))){
+        // Figure what color to blend with current pixel --> 10
+	if(d_10_5 <= d_10_11)
+	   fill(in,out,x,y-1,x,y,1,(x*2)+1,y*2);
+	else
+	   fill(in,out,x+1,y,x,y,1,(x*2)+1,y*2);
+    } else{
+        /*Insert current pixel color into scaledImageData*/
+	fill(in,out,x,y,x,y,0,(x*2)+1,y*2);
+    }
+
+    /**
+    * Bottom Left Edge Detection Rule
+    **/
+    if ((d_10_4+d_10_16+d_14_8+d_14_19+(4*d_9_15)) < (d_9_5+d_9_13+d_11_15+d_15_18+(4*d_10_14))){
+	// Figure what color to blend with current pixel --> 10
+	if(d_10_9 <= d_10_15)
+	    fill(in,out,x-1,y,x,y,1,x*2,(y*2)+1);
+	else
+            fill(in,out,x,y+1,x,y,1,x*2,(y*2)+1);
+
+    } else{
+	/*Insert current pixel color into scaledImageData*/
+	fill(in,out,x,y,x,y,0,x*2,(y*2)+1);
+    }
+
+    /**
+    * Bottom Right Edge Detection Rule
+    **/
+    if ((d_10_6+d_10_14+d_16_12+d_16_19+(4*d_11_15)) < (d_9_15+d_15_20+d_15_17+d_5_11+(4*d_10_16))){
+	// Figure what color to blend with current pixel --> 10
+	if(d_10_11 <= d_10_15)
+	    fill(in,out,x+1,y,x,y,1,(x*2)+1,(y*2)+1);
+	else
+	    fill(in,out,x,y+1,x,y,1,(x*2)+1,(y*2)+1);
+
+    } else{
+	/*Insert current pixel color into scaledImageData*/
+	fill(in,out,x,y,x,y,0,(x*2)+1,(y*2)+1);
+    }
+}
+
+static int config_output(AVFilterLink *outlink)
+{
+    AVFilterContext *ctx = outlink->src;
+    AVFilterLink *inlink = ctx->inputs[0];
+
+    outlink->w = inlink->w * 2 ;
+    outlink->h = inlink->h * 2 ;
+    return 0;
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+    static const enum AVPixelFormat pix_fmts[] = {
+        AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24,AV_PIX_FMT_NONE,
+    };
+
+    ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
+    return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+    int i,j;
+
+    AVFilterContext *ctx = inlink->dst;
+    AVFilterLink *outlink = ctx->outputs[0];
+    AVFrame *out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+    if (!out) {
+        av_frame_free(&in);
+        return AVERROR(ENOMEM);
+    }
+
+    av_frame_copy_props(out, in);
+    for(i=0;i<inlink->w;i++)
+        for(j=0;j<inlink->h;j++)
+	    apply_edge_detection_rules(in,out,i,j);    
+    
+    out->width  = outlink->w;
+    out->height = outlink->h;
+
+    av_frame_free(&in);
+    return ff_filter_frame(outlink, out);
+}
+
+static const AVFilterPad xbr_inputs[] = {
+    {
+        .name         = "default",
+        .type         = AVMEDIA_TYPE_VIDEO,
+        .filter_frame = filter_frame,
+    },
+    { NULL }
+};
+
+static const AVFilterPad xbr_outputs[] = {
+    {
+        .name         = "default",
+        .type         = AVMEDIA_TYPE_VIDEO,
+        .config_props = config_output,
+    },
+    { NULL }
+};
+
+AVFilter ff_vf_xbr = {
+    .name          = "xbr",
+    .description   = NULL_IF_CONFIG_SMALL("Scale the input by 2 using xbr algorithm."),
+    .inputs        = xbr_inputs,
+    .outputs       = xbr_outputs,
+    .query_formats = query_formats,
+};
-- 
1.7.9.5

_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel

Reply via email to