On Thu, Apr 30, 2015 at 09:49:27AM +0200, Clément Bœsch wrote: > On Wed, Apr 29, 2015 at 06:00:54PM +0200, Michael Niedermayer wrote: > > Signed-off-by: Michael Niedermayer <michae...@gmx.at> [...] > > + c = (n*oh_sum_v - o_sum_v*(int64_t)h_sum_v) / > > (sqrt(o_sigma)*sqrt(h_sigma)); > > not using sqrt(o_sigma * h_sigma) for precision or overflow concerns?
overflow [...] > > + for (y=0; y<h; y++) { > > + for (x=0; x<w; x++) { > > + data[x] = src[x]; > > + } > > > + data += in->linesize[p]; > > + src += foc->cover_frame->linesize[p]; > > i've seen gcc derping a lot on this; you might want to check if taking > these dereferencing out of the loop helps i dont think that at this point of this loop it can have any overall relevant effect > > > + } > > + } > > +} > > +static void blur(FOCContext *foc, AVFrame *in, int offx, int offy) > > +{ > > + int x, y, p; > > + > > + for (p=0; p<3; p++) { > > > + int ox = offx>>!!p; > > + int oy = offy>>!!p; > > FF_CEIL_RSHIFT? no that would be wrong here others changed to use FF_CEIL_RSHIF > > > + int stride = in->linesize[p]; > > + uint8_t *data = in->data[p] + ox + oy * stride; > > + int w = foc->obj_frame->width >> !!p; > > + int h = foc->obj_frame->height >> !!p; > > + int iw = in->width >> !!p; > > + int ih = in->height >> !!p; > > + for (y=0; y<h; y++) { > > + for (x=0; x<w; x++) { > > + int c = 0; > > + int s = 0; > > + if (ox) { > > + int scale = 65536 / (x + 1); > > + s += data[-1 + y*stride] * scale; > > + c += scale; > > + } > > + if (oy) { > > + int scale = 65536 / (y + 1); > > + s += data[x - stride] * scale; > > + c += scale; > > + } > > + if (ox + w < iw) { > > + int scale = 65536 / (w - x); > > + s += data[w + y*stride] * scale; > > + c += scale; > > + } > > + if (oy + h < ih) { > > + int scale = 65536 / (h - y); > > + s += data[x + h*stride] * scale; > > + c += scale; > > + } > > + data[x + y*stride] = (s + (c>>1)) / c; > > + } > > + } > > + } > > +} > > + > > +static int filter_frame(AVFilterLink *inlink, AVFrame *in) > > +{ > > + AVFilterContext *ctx = inlink->dst; > > + FOCContext *foc = ctx->priv; > > + float best_score; > > + int best_x, best_y; > > + int i; > > + > > + foc->haystack_frame[0] = av_frame_clone(in); > > + for (i=1; i<foc->mipmaps; i++) { > > + foc->haystack_frame[i] = downscale(foc->haystack_frame[i-1]); > > + } > > + > > + best_score = search(foc, 0, 0, > > + FFMAX(foc->xmin, foc->last_x - 8), > > + FFMIN(foc->xmax, foc->last_x + 8), > > + FFMAX(foc->ymin, foc->last_y - 8), > > + FFMIN(foc->ymax, foc->last_y + 8), > > + &best_x, &best_y, 1.0); > > + > > + best_score = search(foc, 0, foc->mipmaps - 1, foc->xmin, foc->xmax, > > foc->ymin, foc->ymax, > > + &best_x, &best_y, best_score); > > + > > + for (i=0; i<MAX_MIPMAPS; i++) { > > + av_frame_free(&foc->haystack_frame[i]); > > + } > > + > > + if (best_score > foc->threshold) { > > + return ff_filter_frame(ctx->outputs[0], in); > > + } > > + > > > + av_log(ctx, AV_LOG_DEBUG, "Found at %d %d score %f\n", best_x, best_y, > > best_score); > > found what? the filter is called "find and cover" so i think "Found" is clear > > > + foc->last_x = best_x; > > + foc->last_y = best_y; > > + > > + av_frame_make_writable(in); > > + > > + if (foc->mode == MODE_BLUR) { > > + blur (foc, in, best_x, best_y); > > + } else { > > + cover(foc, in, best_x, best_y); > > + } > > + return ff_filter_frame(ctx->outputs[0], in); > > +} > > + > > +static av_cold void uninit(AVFilterContext *ctx) > > +{ > > + FOCContext *foc = ctx->priv; > > + int i; > > + > > + for (i=0; i<MAX_MIPMAPS; i++) { > > nit: pleasefixthestyle google says mipmap has 287,000 result mip map has 57,500 results > > > + av_frame_free(&foc->needle_frame[i]); > > + av_frame_free(&foc->haystack_frame[i]); > > + } > > + > > + if (foc->obj_frame) > > + av_freep(&foc->obj_frame->data[0]); > > + if (foc->cover_frame) > > + av_freep(&foc->cover_frame->data[0]); > > + av_frame_free(&foc->obj_frame); > > +} > > + > > +static av_cold int init(AVFilterContext *ctx) > > +{ > > + FOCContext *foc = ctx->priv; > > + int ret, i; > > + > > + if (!foc->obj_filename || (!foc->cover_filename && foc->mode == > > MODE_COVER)) { > > + av_log(ctx, AV_LOG_ERROR, "object or cover filename not set\n"); > > + return AVERROR(EINVAL); > > + } > > + > > + foc->obj_frame = av_frame_alloc(); > > + if (!foc->obj_frame) > > + return AVERROR(ENOMEM); > > + > > > + if ((ret = ff_load_image(foc->obj_frame->data, > > foc->obj_frame->linesize, > > + &foc->obj_frame->width, > > &foc->obj_frame->height, > > + &foc->obj_frame->format, foc->obj_filename, > > ctx)) < 0) > > + return ret; > > Can you use the dualinput mechanism to load the image instead? See > paletteuse for a typical 1 frame load case. This will avoid a libavformat > and libavcodec dependency to the filter (which you forgot to add to > configure), as well as clumsiness with the file path into the filtergraph. iam a bit too lazy to implement that, also there are 3 inputs (the input video, the object to find and the replacement object to paint over, the last is optional) ill post a patch with all other issues fixed in a moment if you want you can change this one remaining and push it thanks [...] -- Michael GnuPG fingerprint: 9FF2128B147EF6730BADF133611EC787040B0FAB Republics decline into democracies and democracies degenerate into despotisms. -- Aristotle
signature.asc
Description: Digital signature
_______________________________________________ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org http://ffmpeg.org/mailman/listinfo/ffmpeg-devel