summaryrefslogtreecommitdiff
path: root/libavfilter
diff options
context:
space:
mode:
authorThilo Borgmann <thilo.borgmann@mail.de>2022-07-30 13:10:45 +0200
committerThilo Borgmann <thilo.borgmann@mail.de>2022-07-30 13:17:28 +0200
commit9d66417cc5bd705dca15e90aea3fa59d07422705 (patch)
tree440fa109716711117b2dac843a2d90c5a76c3087 /libavfilter
parentcf1f57443158bcbe84a213e8dc631a302993f9a2 (diff)
lavfi/cropdetect: Add new mode to detect crop-area based on motion vectors and edges
This filter allows crop detection even if the video is embedded in non-black areas.
Diffstat (limited to 'libavfilter')
-rw-r--r--libavfilter/version.h2
-rw-r--r--libavfilter/vf_cropdetect.c211
2 files changed, 211 insertions, 2 deletions
diff --git a/libavfilter/version.h b/libavfilter/version.h
index 0946ee91e8..19a009c110 100644
--- a/libavfilter/version.h
+++ b/libavfilter/version.h
@@ -32,7 +32,7 @@
#include "version_major.h"
#define LIBAVFILTER_VERSION_MINOR 46
-#define LIBAVFILTER_VERSION_MICRO 100
+#define LIBAVFILTER_VERSION_MICRO 101
#define LIBAVFILTER_VERSION_INT AV_VERSION_INT(LIBAVFILTER_VERSION_MAJOR, \
diff --git a/libavfilter/vf_cropdetect.c b/libavfilter/vf_cropdetect.c
index b887b9ecb1..e920e671ab 100644
--- a/libavfilter/vf_cropdetect.c
+++ b/libavfilter/vf_cropdetect.c
@@ -26,11 +26,14 @@
#include "libavutil/imgutils.h"
#include "libavutil/internal.h"
#include "libavutil/opt.h"
+#include "libavutil/motion_vector.h"
+#include "libavutil/qsort.h"
#include "avfilter.h"
#include "formats.h"
#include "internal.h"
#include "video.h"
+#include "edge_common.h"
typedef struct CropDetectContext {
const AVClass *class;
@@ -42,6 +45,16 @@ typedef struct CropDetectContext {
int frame_nb;
int max_pixsteps[4];
int max_outliers;
+ int mode;
+ int window_size;
+ int mv_threshold;
+ float low, high;
+ uint8_t low_u8, high_u8;
+ uint8_t *filterbuf;
+ uint8_t *tmpbuf;
+ uint16_t *gradients;
+ char *directions;
+ int *bboxes[4];
} CropDetectContext;
static const enum AVPixelFormat pix_fmts[] = {
@@ -61,6 +74,17 @@ static const enum AVPixelFormat pix_fmts[] = {
AV_PIX_FMT_NONE
};
+enum CropMode {
+ MODE_BLACK,
+ MODE_MV_EDGES,
+ MODE_NB
+};
+
+static int comp(const int *a,const int *b)
+{
+ return FFDIFFSIGN(*a, *b);
+}
+
static int checkline(void *ctx, const unsigned char *src, int stride, int len, int bpp)
{
int total = 0;
@@ -116,11 +140,43 @@ static int checkline(void *ctx, const unsigned char *src, int stride, int len, i
return total;
}
+static int checkline_edge(void *ctx, const unsigned char *src, int stride, int len, int bpp)
+{
+ const uint16_t *src16 = (const uint16_t *)src;
+
+ switch (bpp) {
+ case 1:
+ while (--len >= 0) {
+ if (src[0]) return 0;
+ src += stride;
+ }
+ break;
+ case 2:
+ stride >>= 1;
+ while (--len >= 0) {
+ if (src16[0]) return 0;
+ src16 += stride;
+ }
+ break;
+ case 3:
+ case 4:
+ while (--len >= 0) {
+ if (src[0] || src[1] || src[2]) return 0;
+ src += stride;
+ }
+ break;
+ }
+
+ return 1;
+}
+
static av_cold int init(AVFilterContext *ctx)
{
CropDetectContext *s = ctx->priv;
s->frame_nb = -1 * s->skip;
+ s->low_u8 = s->low * 255. + .5;
+ s->high_u8 = s->high * 255. + .5;
av_log(ctx, AV_LOG_VERBOSE, "limit:%f round:%d skip:%d reset_count:%d\n",
s->limit, s->round, s->skip, s->reset_count);
@@ -128,11 +184,27 @@ static av_cold int init(AVFilterContext *ctx)
return 0;
}
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ CropDetectContext *s = ctx->priv;
+
+ av_freep(&s->tmpbuf);
+ av_freep(&s->filterbuf);
+ av_freep(&s->gradients);
+ av_freep(&s->directions);
+ av_freep(&s->bboxes[0]);
+ av_freep(&s->bboxes[1]);
+ av_freep(&s->bboxes[2]);
+ av_freep(&s->bboxes[3]);
+}
+
static int config_input(AVFilterLink *inlink)
{
AVFilterContext *ctx = inlink->dst;
CropDetectContext *s = ctx->priv;
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
+ const int bufsize = inlink->w * inlink->h;
+ int bpp;
av_image_fill_max_pixsteps(s->max_pixsteps, NULL, desc);
@@ -144,6 +216,21 @@ static int config_input(AVFilterLink *inlink)
s->x2 = 0;
s->y2 = 0;
+ bpp = s->max_pixsteps[0];
+ s->window_size = FFMAX(s->reset_count, 15);
+ s->tmpbuf = av_malloc(bufsize);
+ s->filterbuf = av_malloc(bufsize * s->max_pixsteps[0]);
+ s->gradients = av_calloc(bufsize, sizeof(*s->gradients));
+ s->directions = av_malloc(bufsize);
+ s->bboxes[0] = av_malloc(s->window_size * sizeof(*s->bboxes[0]));
+ s->bboxes[1] = av_malloc(s->window_size * sizeof(*s->bboxes[1]));
+ s->bboxes[2] = av_malloc(s->window_size * sizeof(*s->bboxes[2]));
+ s->bboxes[3] = av_malloc(s->window_size * sizeof(*s->bboxes[3]));
+
+ if (!s->tmpbuf || !s->filterbuf || !s->gradients || !s->directions ||
+ !s->bboxes[0] || !s->bboxes[1] || !s->bboxes[2] || !s->bboxes[3])
+ return AVERROR(ENOMEM);
+
return 0;
}
@@ -155,11 +242,28 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
AVFilterContext *ctx = inlink->dst;
CropDetectContext *s = ctx->priv;
int bpp = s->max_pixsteps[0];
- int w, h, x, y, shrink_by;
+ int w, h, x, y, shrink_by, i;
AVDictionary **metadata;
int outliers, last_y;
int limit = lrint(s->limit);
+ const int inw = inlink->w;
+ const int inh = inlink->h;
+ uint8_t *tmpbuf = s->tmpbuf;
+ uint8_t *filterbuf = s->filterbuf;
+ uint16_t *gradients = s->gradients;
+ int8_t *directions = s->directions;
+ const AVFrameSideData *sd = NULL;
+ int scan_w, scan_h, bboff;
+
+ void (*sobel)(int w, int h, uint16_t *dst, int dst_linesize,
+ int8_t *dir, int dir_linesize,
+ const uint8_t *src, int src_linesize, int src_stride) = (bpp == 2) ? &ff_sobel_16 : &ff_sobel_8;
+ void (*gaussian_blur)(int w, int h,
+ uint8_t *dst, int dst_linesize,
+ const uint8_t *src, int src_linesize, int src_stride) = (bpp == 2) ? &ff_gaussian_blur_16 : &ff_gaussian_blur_8;
+
+
// ignore first s->skip frames
if (++s->frame_nb > 0) {
metadata = &frame->metadata;
@@ -185,11 +289,109 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
last_y = y INC;\
}
+ if (s->mode == MODE_BLACK) {
FIND(s->y1, 0, y < s->y1, +1, frame->linesize[0], bpp, frame->width);
FIND(s->y2, frame->height - 1, y > FFMAX(s->y2, s->y1), -1, frame->linesize[0], bpp, frame->width);
FIND(s->x1, 0, y < s->x1, +1, bpp, frame->linesize[0], frame->height);
FIND(s->x2, frame->width - 1, y > FFMAX(s->x2, s->x1), -1, bpp, frame->linesize[0], frame->height);
+ } else { // MODE_MV_EDGES
+ sd = av_frame_get_side_data(frame, AV_FRAME_DATA_MOTION_VECTORS);
+ s->x1 = 0;
+ s->y1 = 0;
+ s->x2 = inw - 1;
+ s->y2 = inh - 1;
+
+ if (!sd) {
+ av_log(ctx, AV_LOG_WARNING, "Cannot detect: no motion vectors available");
+ } else {
+ // gaussian filter to reduce noise
+ gaussian_blur(inw, inh,
+ filterbuf, inw*bpp,
+ frame->data[0], frame->linesize[0], bpp);
+
+ // compute the 16-bits gradients and directions for the next step
+ sobel(inw, inh, gradients, inw, directions, inw, filterbuf, inw*bpp, bpp);
+
+ // non_maximum_suppression() will actually keep & clip what's necessary and
+ // ignore the rest, so we need a clean output buffer
+ memset(tmpbuf, 0, inw * inh);
+ ff_non_maximum_suppression(inw, inh, tmpbuf, inw, directions, inw, gradients, inw);
+
+
+ // keep high values, or low values surrounded by high values
+ ff_double_threshold(s->low_u8, s->high_u8, inw, inh,
+ tmpbuf, inw, tmpbuf, inw);
+
+ // scan all MVs and store bounding box
+ s->x1 = inw - 1;
+ s->y1 = inh - 1;
+ s->x2 = 0;
+ s->y2 = 0;
+ for (i = 0; i < sd->size / sizeof(AVMotionVector); i++) {
+ const AVMotionVector *mv = (const AVMotionVector*)sd->data + i;
+ const int mx = mv->dst_x - mv->src_x;
+ const int my = mv->dst_y - mv->src_y;
+
+ if (mv->dst_x >= 0 && mv->dst_x < inw &&
+ mv->dst_y >= 0 && mv->dst_y < inh &&
+ mv->src_x >= 0 && mv->src_x < inw &&
+ mv->src_y >= 0 && mv->src_y < inh &&
+ mx * mx + my * my >= s->mv_threshold * s->mv_threshold) {
+ s->x1 = mv->dst_x < s->x1 ? mv->dst_x : s->x1;
+ s->y1 = mv->dst_y < s->y1 ? mv->dst_y : s->y1;
+ s->x2 = mv->dst_x > s->x2 ? mv->dst_x : s->x2;
+ s->y2 = mv->dst_y > s->y2 ? mv->dst_y : s->y2;
+ }
+ }
+
+ // assert x1<x2, y1<y2
+ if (s->x1 > s->x2) FFSWAP(int, s->x1, s->x2);
+ if (s->y1 > s->y2) FFSWAP(int, s->y1, s->y2);
+
+ // scan outward looking for 0-edge-lines in edge image
+ scan_w = s->x2 - s->x1;
+ scan_h = s->y2 - s->y1;
+
+#define FIND_EDGE(DST, FROM, NOEND, INC, STEP0, STEP1, LEN) \
+ for (last_y = y = FROM; NOEND; y = y INC) { \
+ if (checkline_edge(ctx, tmpbuf + STEP0 * y, STEP1, LEN, bpp)) { \
+ if (last_y INC == y) { \
+ DST = y; \
+ break; \
+ } else \
+ last_y = y; \
+ } \
+ } \
+ if (!(NOEND)) { \
+ DST = y -(INC); \
+ }
+ FIND_EDGE(s->y1, s->y1, y >= 0, -1, inw, bpp, scan_w);
+ FIND_EDGE(s->y2, s->y2, y < inh, +1, inw, bpp, scan_w);
+ FIND_EDGE(s->x1, s->x1, y >= 0, -1, bpp, inw, scan_h);
+ FIND_EDGE(s->x2, s->x2, y < inw, +1, bpp, inw, scan_h);
+
+ // queue bboxes
+ bboff = (s->frame_nb - 1) % s->window_size;
+ s->bboxes[0][bboff] = s->x1;
+ s->bboxes[1][bboff] = s->x2;
+ s->bboxes[2][bboff] = s->y1;
+ s->bboxes[3][bboff] = s->y2;
+
+ // sort queue
+ bboff = FFMIN(s->frame_nb, s->window_size);
+ AV_QSORT(s->bboxes[0], bboff, int, comp);
+ AV_QSORT(s->bboxes[1], bboff, int, comp);
+ AV_QSORT(s->bboxes[2], bboff, int, comp);
+ AV_QSORT(s->bboxes[3], bboff, int, comp);
+
+ // return median of window_size elems
+ s->x1 = s->bboxes[0][bboff/2];
+ s->x2 = s->bboxes[1][bboff/2];
+ s->y1 = s->bboxes[2][bboff/2];
+ s->y2 = s->bboxes[3][bboff/2];
+ }
+ }
// round x and y (up), important for yuv colorspaces
// make sure they stay rounded!
@@ -243,6 +445,12 @@ static const AVOption cropdetect_options[] = {
{ "skip", "Number of initial frames to skip", OFFSET(skip), AV_OPT_TYPE_INT, { .i64 = 2 }, 0, INT_MAX, FLAGS },
{ "reset_count", "Recalculate the crop area after this many frames",OFFSET(reset_count),AV_OPT_TYPE_INT,{ .i64 = 0 }, 0, INT_MAX, FLAGS },
{ "max_outliers", "Threshold count of outliers", OFFSET(max_outliers),AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, FLAGS },
+ { "mode", "set mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=MODE_BLACK}, 0, MODE_NB-1, FLAGS, "mode" },
+ { "black", "detect black pixels surrounding the video", 0, AV_OPT_TYPE_CONST, {.i64=MODE_BLACK}, INT_MIN, INT_MAX, FLAGS, "mode" },
+ { "mvedges", "detect motion and edged surrounding the video", 0, AV_OPT_TYPE_CONST, {.i64=MODE_MV_EDGES}, INT_MIN, INT_MAX, FLAGS, "mode" },
+ { "high", "Set high threshold for edge detection", OFFSET(high), AV_OPT_TYPE_FLOAT, {.dbl=25/255.}, 0, 1, FLAGS },
+ { "low", "Set low threshold for edge detection", OFFSET(low), AV_OPT_TYPE_FLOAT, {.dbl=15/255.}, 0, 1, FLAGS },
+ { "mv_threshold", "motion vector threshold when estimating video window size", OFFSET(mv_threshold), AV_OPT_TYPE_INT, {.i64=8}, 0, 100, FLAGS},
{ NULL }
};
@@ -270,6 +478,7 @@ const AVFilter ff_vf_cropdetect = {
.priv_size = sizeof(CropDetectContext),
.priv_class = &cropdetect_class,
.init = init,
+ .uninit = uninit,
FILTER_INPUTS(avfilter_vf_cropdetect_inputs),
FILTER_OUTPUTS(avfilter_vf_cropdetect_outputs),
FILTER_PIXFMTS_ARRAY(pix_fmts),