vf_redact.c

andrew, 08/01/2012 08:56 pm

Download (21.9 KB)

 
1
/*
2
 * Copyright (c) 2011 Andrew Senior 
3
 *
4
 * This file is for use with ffmpeg
5
 *
6
 */
7

    
8
/**
9
 * @file
10
 * Redaction filter. Read a file describing redactions. Wipe boxes in frames
11
 * accordingly.
12
 */
13

    
14
/* The redactions filename is passed as the sole parameter to the filter.
15
 * The file consists of lines describing "boxtracks" which are defined
16
 * by 7 comma-separated values: "%lf,%lf,%d,%d,%d,%d,%s"
17
 * e.g. "0.5,1.5,50,100,0,1000,green"
18
 * First: start and end times (floating point, in seconds, referenced to the
19
 * presentation time stamp given by ffmpeg) and left,right, top, bottom
20
 * spatial coordinates of the redaction region. (origin is top left, 
21
 * coordinates increase down and to right.)
22
 * finally a redaction method string is given which is either "pixel" for
23
 * pixellation, "inverse" for inverse pixellation (not yet implemented)
24
 * "blur" for face blurring.
25
 * or an ffmpeg color specifier for solid redaction. 
26
 * The file can contain comments, ie lines beginning with "#".
27
 */
28

    
29
/* Use:
30
 * put this file in the libavfilter directory
31
 * add the line
32
       REGISTER_FILTER (REDACT,      redact,      vf);
33
 * to avfilter_register_all in libavfilter/allfilters.c,
34
 * add
35
       OBJS-$(CONFIG_REDACT_FILTER)                 += vf_redact.o
36
 * to libavfilter/Makefile, and
37
       CONFIG_REDACT_FILTER=yes
38
 * to config.mak
39
 */
40

    
41

    
42
/* todo:
43
 * add noise to pixellation to make super-resolution attacks harder.
44
 * a more stable-across-time pixellation appearance
45
 * inverse pixellation
46
 * allow the megapixel size to be specified.
47
 * alternative ways to import the specification
48
 *    (e.g. another input port, or directly in the config string)
49
 * doing face detection & tracking on the fly in the filter
50
 * preserving redacted information in a separate stream
51
 * allow motion specification for tracks (velocity, spline...)
52
 * reference megapixels to the top left, rather than absolute grid?
53
 */
54

    
55
#include "libavutil/avstring.h"
56
#include "libavutil/colorspace.h"
57
#include "libavutil/lfg.h"
58
#include "libavutil/pixdesc.h"
59
#include "libavutil/parseutils.h"
60
#include "avfilter.h"
61
#include <strings.h>
62

    
63
enum { Y, U, V, A };
64
static int logging = 0;
65
typedef enum {redact_solid, 
66
              redact_pixellate, 
67
              redact_inverse_pixellate, 
68
              redact_blur}
69
  redaction_method;
70
typedef struct {
71
  int l, r, t, b;
72
  double start, end;
73
  redaction_method method;
74
  unsigned char yuv_color[4];  // Used when method is redact_solid
75
} BoxTrack;
76

    
77
typedef struct {
78
  int vsub, hsub;   //< chroma subsampling
79
  int numtracks;
80
  double time_seconds;
81
  BoxTrack **boxtracks;
82
  AVLFG random;
83
  AVFilterBufferRef *lastredacted;    ///< Previous frame
84
} RedactionContext;
85

    
86
static void log_box_track(BoxTrack *bt,
87
                          AVFilterContext *ctx) {
88
  av_log(ctx, AV_LOG_INFO, "Box track: %d: (%.1f-%.1fs) %d-%d x %d-%d\n", 
89
         bt->method, bt->start, bt->end,
90
         bt->l, bt->r, bt->t, bt->b);
91
}
92

    
93
// memory status stuff from
94
// stackoverflow.com/questions/1558402/memory-usage-of-current-process-in-c
95
typedef struct {
96
    unsigned long size,resident,share,text,lib,data,dt;
97
} statm_t;
98

    
99
static void read_off_memory_status(statm_t * result)
100
{
101
  const char* statm_path = "/proc/self/statm";
102

    
103
  FILE *f = fopen(statm_path,"r");
104
  if(!f){
105
    abort();
106
  }
107
  if(7 != fscanf(f,"%ld %ld %ld %ld %ld %ld %ld",
108
                 &result->size,
109
                 &result->resident,
110
                 &result->share,
111
                 &result->text,
112
                 &result->lib,
113
                 &result->data,
114
                 &result->dt))
115
  {
116
    abort();
117
  }
118
  fclose(f);
119
}
120

    
121
static BoxTrack *box_track_from_string(const char *track_def,
122
                                       AVFilterContext *ctx) {
123
  BoxTrack *boxtrack = NULL;
124
  int rv = 0;
125
  int l, r, t, b;
126
#define BUFLEN 1000
127
  char method[BUFLEN];
128
  double start, end;
129

    
130
  // Allow comments, empty lines.
131
  if (track_def[0] == '#' || track_def[0] == '\0')
132
    return NULL;
133

    
134
  rv = sscanf(track_def, "%lf,%lf,%d,%d,%d,%d,%s", &start,&end, 
135
              &l, &r, &t, &b, method);
136
  if (rv != 7) {
137
    av_log(ctx, AV_LOG_ERROR, "Failed to parse boxtrack '%s' .\n", track_def);
138
    return NULL;
139
  }
140
  boxtrack = (BoxTrack *)av_malloc(sizeof(BoxTrack));
141
  boxtrack->l = l;
142
  boxtrack->r = r;
143
  boxtrack->t = t;
144
  boxtrack->b = b;
145
  boxtrack->start = start;
146
  boxtrack->end = end;
147
  boxtrack->method = redact_pixellate;
148

    
149
  // todo: allow the granularity of the pixellation to be specified.
150
  if (av_strncasecmp(method, "pixel", 5) == 0)
151
    boxtrack->method = redact_pixellate;
152
  else if (av_strncasecmp(method, "inv", 3) == 0)
153
    boxtrack->method = redact_inverse_pixellate;
154
  else if (av_strncasecmp(method, "blur", 4) == 0)
155
    boxtrack->method = redact_blur;
156
  else {
157
    uint8_t rgba_color[4];
158

    
159
    boxtrack->method = redact_solid;
160

    
161
    if (av_parse_color(rgba_color, method, -1, ctx) < 0)
162
      av_log(ctx, AV_LOG_ERROR, "Couldn't parse colour '%s' .\n", method);
163

    
164
    boxtrack->yuv_color[Y] =
165
      RGB_TO_Y_CCIR(rgba_color[0], rgba_color[1], rgba_color[2]);
166
    boxtrack->yuv_color[U] =
167
      RGB_TO_U_CCIR(rgba_color[0], rgba_color[1], rgba_color[2], 0);
168
    boxtrack->yuv_color[V] =
169
      RGB_TO_V_CCIR(rgba_color[0], rgba_color[1], rgba_color[2], 0);
170
    boxtrack->yuv_color[A] = rgba_color[3];
171
  }
172
  return boxtrack;
173
}
174

    
175
static av_cold int init(AVFilterContext *ctx, const char *args, void *opaque)
176
{
177
    RedactionContext *redaction= ctx->priv;
178
    FILE *file = NULL;
179
    char buf[BUFLEN];
180
    unsigned int seed=298379;
181
    redaction->boxtracks = NULL;
182
    redaction->lastredacted = NULL;
183
    redaction->time_seconds = NAN;
184
    if (!args) {
185
      av_log(ctx, AV_LOG_ERROR, "No arguments given to redact.\n");
186
      return 1;
187
    }
188
    file = fopen(args, "r");
189
    if (!file)  {
190
      av_log(ctx, AV_LOG_ERROR, "Can't open redaction file: '%s'\n", args);
191
      return 2;
192
    }
193
    redaction->numtracks = 0;
194
    // Parse the config file.
195
    while (!feof(file)) {
196
      BoxTrack **boxtracks = NULL;
197
      BoxTrack *new_track = NULL;
198

    
199
      if (fgets(buf, BUFLEN, file) == NULL) break; // EOF
200
      if (strncmp(buf, "seed", 4) == 0) {
201
        int rv = sscanf(buf, "seed %ud", &seed);
202
        if (rv != 1)
203
          av_log(ctx, AV_LOG_ERROR, "Didn't parse seed: %s.\n", buf);
204

    
205
        continue;
206
      }
207
      new_track = box_track_from_string(buf, ctx);
208
      if (new_track == NULL)
209
        continue;
210
      log_box_track(new_track, ctx);
211
      // Resize the array and add the new track.
212
      boxtracks = (BoxTrack **)av_malloc((redaction->numtracks + 1) *
213
                                         sizeof(BoxTrack *));
214
      for (int i = 0; i < redaction->numtracks; ++i)
215
        boxtracks[i] = redaction->boxtracks[i];
216
      boxtracks[redaction->numtracks++] = new_track;
217
      av_free(redaction->boxtracks);
218
      redaction->boxtracks = boxtracks;
219
    }
220
    av_log(ctx, AV_LOG_INFO, "Seed is : '%ud'\n", seed);
221
    av_lfg_init(&redaction->random, seed);
222
    fclose(file);
223
    // Sort the tracks so the earliest-starting are at the end of the array.
224
    for (int j = 0; j < redaction->numtracks -1; ++j)
225
      for (int k = j + 1; k < redaction->numtracks; ++k)
226
        if (redaction->boxtracks[j]->start < redaction->boxtracks[k]->start) {
227
          BoxTrack *temp = redaction->boxtracks[j];
228
          redaction->boxtracks[j] = redaction->boxtracks[k];
229
          redaction->boxtracks[k] = temp;
230
        }
231

    
232
    return 0;
233
}
234

    
235
static int query_formats(AVFilterContext *ctx)
236
{
237
    enum PixelFormat pix_fmts[] = {
238
        PIX_FMT_YUV444P,  PIX_FMT_YUV422P,  PIX_FMT_YUV420P,
239
        PIX_FMT_YUV411P,  PIX_FMT_YUV410P,
240
        PIX_FMT_YUVJ444P, PIX_FMT_YUVJ422P, PIX_FMT_YUVJ420P,
241
        PIX_FMT_YUV440P,  PIX_FMT_YUVJ440P,
242
        PIX_FMT_NONE
243
    };
244

    
245
    avfilter_set_common_pixel_formats(ctx, avfilter_make_format_list(pix_fmts));
246
    return 0;
247
}
248

    
249
static int config_input(AVFilterLink *inlink)
250
{
251
    RedactionContext *redaction = inlink->dst->priv;
252

    
253
    redaction->hsub = av_pix_fmt_descriptors[inlink->format].log2_chroma_w;
254
    redaction->vsub = av_pix_fmt_descriptors[inlink->format].log2_chroma_h;
255

    
256
    av_log(inlink->dst, AV_LOG_INFO, "Redaction with %d tracks %d %d\n",
257
           redaction->numtracks, redaction->hsub, redaction->vsub);
258

    
259
    return 0;
260
}
261

    
262
// Decode the timestamp.
263
static void start_frame(AVFilterLink *inlink, AVFilterBufferRef *picref)
264
{
265
  AVFilterContext *ctx = inlink->dst;
266
  RedactionContext *redaction = inlink->dst->priv;
267
  AVFilterLink *outlink0 = inlink->dst->outputs[0];
268
  AVFilterLink *outlink1 = inlink->dst->outputs[1];
269
  AVFilterBufferRef *outpicref = NULL;
270
  
271
  if (logging)
272
    av_log(ctx, AV_LOG_INFO, "startstartframe\n");
273
  redaction->time_seconds = picref->pts * av_q2d(inlink->time_base);
274
  outpicref = avfilter_get_video_buffer(outlink0, AV_PERM_WRITE,
275
                                        outlink0->w, outlink0->h);
276
  avfilter_copy_buffer_ref_props(outpicref, picref);
277
  outpicref->video->w = outlink0->w;
278
  outpicref->video->h = outlink0->h;
279
  outlink0->out_buf = outpicref;
280
  outlink0->out_buf->pts = picref->pts;
281
  outlink0->out_buf->pos = picref->pos;
282

    
283
  avfilter_start_frame(outlink0,
284
                       avfilter_ref_buffer(outlink0->out_buf,  ~0));
285

    
286
  outpicref = avfilter_get_video_buffer(outlink1, AV_PERM_WRITE,
287
                                        outlink1->w, outlink1->h);
288
  avfilter_copy_buffer_ref_props(outpicref, picref);
289
  outpicref->video->w = outlink0->w;
290
  outpicref->video->h = outlink0->h;
291
  outlink1->out_buf = outpicref;
292
  outlink1->out_buf->pts = picref->pts;
293
  outlink1->out_buf->pos = picref->pos;
294
  avfilter_start_frame(outlink1,outlink1->out_buf);
295
  //                       avfilter_ref_buffer(outlink1->out_buf,  ~0));
296
  if (logging)
297
    av_log(ctx, AV_LOG_INFO, "endstartframe\n");
298
}
299

    
300
static int noise = 10;
301
static void convolve_ny(int t, int b, int maxy,
302
                        unsigned char *row, int blur, unsigned char *blurbuf,
303
                        int step,
304
                        AVLFG *random) {
305
  int halfblur = blur/2;
306
  int blursum = 0;
307
  int i;
308
  for (i = 0; i < blur; ++i) {
309
    int pos = t + i - halfblur;
310
    blurbuf[i] = (pos <= 0) ? row[0] : row[pos * step];
311
    blursum += blurbuf[i];
312
  }
313
  for (int y = t; y < b; ++y, ++i) {
314
    int newval = blursum / blur;
315
    int newpos = y + (blur + 1)/2;
316
    i %= blur;
317
    if (noise > 0) {
318
      newval += (av_lfg_get(random) % (2 * noise + 1)) - noise;
319
      if (newval < 0 ) newval = 0;
320
      else if (newval > 255) newval = 255;
321
    }
322
    row[y*step] = newval;
323
    blursum -= blurbuf[i];
324
    blurbuf[i] = row[step * ((newpos < maxy) ? newpos : (maxy - 1))];
325
    blursum += blurbuf[i];
326
  }
327
}
328
// Keep a rolling buffer of n values and their sum.
329
// Store the average in the output vector and update the sum by dropping one
330
// value and inserting another.
331
static void convolve_nx(int l, int r, int maxx,
332
                        unsigned char *row, int blur, unsigned char *blurbuf,
333
                        AVLFG *random) {
334
  int halfblur = blur/2;
335
  int blursum = 0;
336
  int i;
337
  for (i = 0; i < blur; ++i) {
338
    int pos = l + i - halfblur;
339
    blurbuf[i] = (pos <= 0) ? row[0] : row[pos];
340
    blursum += blurbuf[i];
341
  }
342
  for (int x = l; x < r; ++x, ++i) {
343
    int newval = blursum / blur;
344
    int newpos = x + (blur + 1)/2;
345
    i %= blur;
346
    if (noise > 0) {
347
      newval += (av_lfg_get(random) % (2 * noise + 1)) - noise;
348
      if (newval < 0 ) newval = 0;
349
      else if (newval > 255) newval = 255;
350
    }
351
    row[x] = newval;
352
    blursum -= blurbuf[i];
353
    blurbuf[i] = row[(newpos < maxx) ? newpos : (maxx - 1)];
354
    blursum += blurbuf[i];
355
  }
356
}
357

    
358
static void blur_one_round(AVFilterBufferRef *picref, BoxTrack *boxtrack,
359
                           int y0, int h, int hsub, int vsub,
360
                           int blur, unsigned char *blurbuf,
361
                           AVLFG *random) {
362
  int xb = boxtrack->l, yb = boxtrack->t;
363
  int hb = boxtrack->b - boxtrack->t;
364
  int wb = boxtrack->r - boxtrack->l;
365
  int x, y;
366
  int xmax, ymax;
367

    
368
#define BLURX
369
#define BLURY
370
#ifdef BLURX  
371
  x = FFMAX(xb, 0);
372
  blur = wb/2;
373
  xmax = FFMIN( (xb + wb), picref->video->w);
374
  for (y = FFMAX(yb, y0); y < (y0 + h) && y < (yb + hb); ++y) {
375
    for (int plane = 0; plane < 3; plane++) {
376
      int ds = (plane == 0) ? 0 : hsub;
377
      unsigned char *row = picref->data[plane] +
378
        picref->linesize[plane] * (y >> ((plane == 0) ? 0 : vsub));
379
      convolve_nx(x >> ds, (xmax + ((1<<ds) -1))>> ds, picref->video->w >> ds,
380
                  row,
381
                  (blur  + ((1<<ds) -1))>> ds, blurbuf, random);
382
    }
383
  }
384
#endif
385
#ifdef BLURY
386
  y  = FFMAX(yb, y0);
387
  ymax = FFMIN( (yb + hb), (y0 + h));
388
  blur = hb/2;
389
  for (x = FFMAX(xb, 0); x < (xb + wb) && x < picref->video->w; x++) {
390
    for (int plane = 0; plane < 3; plane++) {
391
      int ds = (plane == 0) ? 0 : vsub;
392
      unsigned char *col = picref->data[plane] + (x >>  ((plane == 0) ? 0 : hsub));
393
      convolve_ny(y >> ds, (ymax + ((1<<ds) -1))  >> ds,
394
                  (y0 + h) >> ds,
395
                  col, (blur  + ((1<<ds) -1))>> ds, blurbuf,
396
                  picref->linesize[plane], random);
397
    }
398
  }
399
#endif
400
}
401

    
402
static void copybox_mixold_alpha(AVFilterBufferRef *source,
403
                                 AVFilterBufferRef *picref,
404
                                 AVFilterBufferRef *lastref,
405
                                 BoxTrack *boxtrack,
406
                                 int hsub, int vsub,
407
                                 AVLFG *random) {
408
  int xb = boxtrack->l;
409
  int yb = boxtrack->t;
410
  int hb = boxtrack->b - boxtrack->t;
411
  int wb = boxtrack->r - boxtrack->l;
412

    
413
  float blur_boundary = 0.2;
414
  for (int y = FFMAX(yb, 0); y < picref->video->h && y < boxtrack->b; y++) {
415
    float ynormsq = (y * 2.0 - (boxtrack->b + boxtrack->t)) / hb;
416
    ynormsq *= ynormsq;
417
    for (int plane = 0; plane < 3; plane++) {
418
      int ysub = (y >> ((plane == 0) ? 0 : vsub));
419
      unsigned char *row = picref->data[plane] +
420
        picref->linesize[plane] * ysub;
421
      unsigned char *srcrow = source->data[plane] +
422
        picref->linesize[plane] * ysub;
423
      unsigned char *lastrow = lastref->data[plane] +
424
        picref->linesize[plane] * ysub;
425
      
426
      int thishsub = (plane == 0) ? 0 : hsub;
427
      int xmin = FFMAX(xb, 0) >> thishsub;
428
      int xmax = FFMIN(boxtrack->r, picref->video->w)
429
        >> thishsub;
430
      for (int x = xmin; x < xmax; x++) {
431
        // TODO: do the alphablending in int.
432
        // TODO: allow a flag for alpha blending or not.
433
        float xnorm = ((x << thishsub) * 2.0 - (boxtrack->l + boxtrack->r))
434
          / wb;
435
        float mixlast = ((av_lfg_get(random) % 20) + 10) / 40.0;
436
        float alphax = (1 - sqrt(xnorm * xnorm + ynormsq));
437
        if (alphax < 0) {
438
          row[x] = srcrow[x];
439
          continue;
440
        }
441
        if (alphax > blur_boundary)
442
          alphax = 1;
443
        else
444
          alphax /= blur_boundary;
445
        row[x] = (1 - alphax) * srcrow[x] +
446
          alphax * ((1 - mixlast) * row[x] + mixlast * lastrow[x]);
447
      }
448
    }
449
  }
450
}
451
                            
452
// In a picture carry out the obscuration of boxtrack.
453
static void obscure_one_box(AVFilterBufferRef *source,
454
                            AVFilterBufferRef *picref,
455
                            AVFilterBufferRef *lastref,
456
                            BoxTrack *boxtrack,
457
                            int y0, int h, int hsub, int vsub,
458
                            AVLFG *random) {
459
  unsigned char *row[4];
460
  int xb = boxtrack->l, yb = boxtrack->t;
461
  int hb = boxtrack->b - boxtrack->t;
462
  int wb = boxtrack->r - boxtrack->l;
463
  int megapixel_size = 64;  // todo: get from file
464
  int x, y;
465
  if (boxtrack->method == redact_blur) {
466
    const int blur = FFMAX(hb, wb);
467
    unsigned char *blurbuf = (unsigned char *)av_malloc(blur);
468
    blur_one_round(picref, boxtrack, y0, h, hsub, vsub, blur, blurbuf, random);
469
    copybox_mixold_alpha(source, picref,
470
                         ((lastref==NULL)?source:lastref),
471
                         boxtrack, hsub, vsub, random);
472
    av_free(blurbuf);
473
    return;
474
  }
475

    
476
  for (y = FFMAX(yb, y0); y < (y0 + h) && y < (yb + hb); y++) {
477
    row[0] = picref->data[0] + y * picref->linesize[0];
478

    
479
    for (int plane = 1; plane < 3; plane++)
480
      row[plane] = picref->data[plane] +
481
        picref->linesize[plane] * (y >> vsub);
482

    
483
    for (x = FFMAX(xb, 0); x < (xb + wb) && x < picref->video->w; x++) {
484
      double alpha = (double)boxtrack->yuv_color[A] / 255;
485
      if (boxtrack->method == redact_solid) {
486
        row[0][x] = (1 - alpha) * row[0][x] +
487
          alpha * boxtrack->yuv_color[Y];
488
        // todo: if hsub is non-zero this will do the same pixel mutliple times.
489
        // which is wasteful. Wrong if alpha is != 1
490
        row[1][x >> hsub] = (1 - alpha) * row[1][x >> hsub] +
491
          alpha * boxtrack->yuv_color[U];
492
        row[2][x >> hsub] = (1 - alpha) * row[2][x >> hsub] +
493
          alpha * boxtrack->yuv_color[V];
494
      } else if (boxtrack->method == redact_pixellate) {
495
        int x_quant = (x / megapixel_size) * megapixel_size;
496
        int y_quant = (y / megapixel_size) * megapixel_size;
497
        row[0][x] = (picref->data[0] + y_quant *
498
                     picref->linesize[0])[x_quant];
499
        row[1][x >> hsub] = (picref->data[1] + picref->linesize[1] *
500
           (y_quant >> vsub))[x_quant >> hsub];
501
        row[2][x >> hsub] = (picref->data[2] + picref->linesize[2] *
502
           (y_quant >> vsub))[x_quant >> hsub];
503
      }
504
    }
505
  }
506
}
507

    
508
// Set all elements of planes 0,1,2 to val: 128=grey 0=green
509
static void erase_output2(AVFilterBufferRef *outpic,
510
                          int y0, int h, int hsub, int vsub,
511
                          unsigned char val) {
512
  unsigned char v[] = { 16, 128, 128};
513
  for (int y = y0; y < (y0 + h); y++) {
514
    for (int plane = 0; plane < 3; plane++) {
515
      uint8_t *outrow = outpic->data[plane] +
516
        outpic->linesize[plane] * (y >> ((plane == 0)?0:vsub));
517
      const int xmax = outpic->video->w >> ((plane == 0)?0:hsub);
518
      memset(outrow, v[plane], xmax);
519
    }  // plane
520
  }  // y
521
}
522
static void copy_all(AVFilterBufferRef *picref,
523
                     AVFilterBufferRef *outpic,
524
                     int hsub, int vsub) {
525
  for (int y = 0; y < picref->video->h; y++) {
526
    for (int plane = 0; plane < 3; plane++) {
527
      uint8_t *row = picref->data[plane] +
528
        picref->linesize[plane] * (y >> ((plane == 0)?0:vsub));
529
      uint8_t *outrow = outpic->data[plane] +
530
        outpic->linesize[plane] * (y >> ((plane == 0)?0:vsub));
531
      int xwid = picref->video->w >> ((plane == 0)?0:hsub);
532
      memcpy(outrow, row, xwid);
533
    }  // plane
534
  }  // y
535
}
536

    
537
// Copy one box from the input to the output.
538
static void copy_one_box(AVFilterBufferRef *picref,
539
                         AVFilterBufferRef *outpic,
540
                         BoxTrack *boxtrack,
541
                         int y0, int h, int hsub, int vsub) {
542
  int xb = boxtrack->l;
543
  int yb = boxtrack->t;
544
  int hb = boxtrack->b - boxtrack->t;
545
  int wb = boxtrack->r - boxtrack->l;
546

    
547
  for (int y = FFMAX(yb, y0); y < (y0 + h) && y < (yb + hb); y++) {
548
    for (int plane = 0; plane < 3; plane++) {
549
      uint8_t *row = picref->data[plane] +
550
        picref->linesize[plane] * (y >> ((plane == 0)?0:vsub));
551
      uint8_t *outrow = outpic->data[plane] +
552
        outpic->linesize[plane] * (y >> ((plane == 0)?0:vsub));
553
      int xmin = FFMAX(xb, 0)  >> ((plane == 0)?0:hsub);
554
      int xmax = FFMIN((xb + wb), picref->video->w)
555
        >> ((plane == 0)?0:hsub);
556
      memcpy(outrow + xmin, row + xmin, xmax - xmin);
557
    }  // plane
558
  }  // y
559
}
560

    
561
static void end_frame(AVFilterLink *inlink)
562
{
563
  AVFilterContext *ctx = inlink->dst;
564
  RedactionContext *redaction = inlink->dst->priv;
565
  AVFilterBufferRef *picref = inlink->cur_buf;
566
  AVFilterLink *outlink0 = inlink->dst->outputs[0];
567
  AVFilterBufferRef *outpic0 = outlink0->out_buf;
568
  AVFilterLink *outlink1 = inlink->dst->outputs[1];
569
  AVFilterBufferRef *outpic1 = outlink1->out_buf;
570
  int box = 0;
571
  statm_t status;
572

    
573
  if (logging)
574
    av_log(ctx, AV_LOG_INFO, "vf_redact start\n");
575
  // Put the original into outpic0.
576
  copy_all(picref, outpic0, redaction->hsub, redaction->vsub);
577
  /* erase_output2(inlink->dst->outputs[0]->out_buf, 0, inlink->h, */
578
  /*                 redaction->hsub, redaction->vsub, 0); */
579
  // outpic1 is the redaction reversal data, initially blank.
580
  erase_output2(outpic1, 0, inlink->h, redaction->hsub, redaction->vsub, 40);
581
  // First backup the boxes to-be-redacted
582
  for (box = redaction->numtracks -1; box >= 0; --box) {
583
    BoxTrack *boxtrack = redaction->boxtracks[box];
584

    
585
    // Tracks are sorted by start time, so if this one starts in the future
586
    // all remaining ones will.
587
    if (boxtrack->start > redaction->time_seconds)
588
      break;
589
    if (boxtrack->end < redaction->time_seconds) {
590
      // Delete any tracks we've passed.
591
      av_free(redaction->boxtracks[box]);
592
      // Shuffle down any still-active tracks higher in the array.
593
      // (We've already processed them this frame.)
594
      for (int t = box + 1; t < redaction->numtracks; ++t)
595
        redaction->boxtracks[t - 1] = redaction->boxtracks[t];
596
      // Reduce the count.
597
      redaction->boxtracks[--redaction->numtracks] = NULL;
598
    } else {
599
      copy_one_box(picref, outpic1, boxtrack, 0, inlink->h,
600
                         redaction->hsub, redaction->vsub);
601
    }
602
  }
603
  // Now store the redacted video into outpic0.
604
  for (box = redaction->numtracks -1; box >= 0; --box) {
605
    BoxTrack *boxtrack = redaction->boxtracks[box];
606

    
607
    if (boxtrack->start > redaction->time_seconds)
608
      break;
609
    obscure_one_box(picref, outpic0, redaction->lastredacted,
610
                    boxtrack, 0, inlink->h, 
611
                    redaction->hsub, redaction->vsub,
612
                    &redaction->random);
613
  }
614
  avfilter_draw_slice(inlink->dst->outputs[0], 0, inlink->h, 1);
615
  avfilter_draw_slice(inlink->dst->outputs[1], 0, inlink->h, 1);
616
  // Keep track of the previous redacted output.
617
  if (redaction->lastredacted != NULL)
618
    avfilter_unref_buffer(redaction->lastredacted);
619
  redaction->lastredacted = avfilter_ref_buffer(outlink0->out_buf,  ~0);
620
  avfilter_end_frame(inlink->dst->outputs[0]);
621
  avfilter_end_frame(inlink->dst->outputs[1]);
622
  avfilter_unref_buffer(picref);
623
  avfilter_unref_buffer(outlink0->out_buf);
624
  avfilter_unref_buffer(outlink1->out_buf);
625
  if (logging) {
626
    av_log(ctx, AV_LOG_INFO, "doneendframe1\n");
627
    read_off_memory_status(&status);
628
  // unsigned long size,resident,share,text,lib,data,dt;
629
    av_log(ctx, AV_LOG_INFO, "Redaction memory RSS %lu data %lu\n",
630
         status.resident, status.data);
631
  }
632
}
633

    
634
static av_cold void uninit(AVFilterContext *ctx)
635
{
636
  RedactionContext *redaction= ctx->priv;
637
  if (redaction->lastredacted != NULL)
638
    avfilter_unref_buffer(redaction->lastredacted);
639
  for (int i = 0; i < redaction->numtracks; ++i) {
640
    av_free(redaction->boxtracks[i]);
641
  }
642
  av_free(redaction->boxtracks);
643
}
644

    
645
AVFilter avfilter_vf_redact = {
646
  .name      = "redact",
647
  .description =
648
  NULL_IF_CONFIG_SMALL("Redact the input video according to a track file."),
649
  .priv_size = sizeof(RedactionContext),
650
  .init      = init,
651
  .uninit      = uninit,
652

    
653
  .query_formats   = query_formats,
654
  .inputs    = (const AVFilterPad[]) {
655
    { .name             = "default",
656
      .type             = AVMEDIA_TYPE_VIDEO,
657
      .config_props     = config_input,
658
      .get_video_buffer =
659
      avfilter_null_get_video_buffer,
660
      .start_frame      = start_frame,
661
      .draw_slice       = avfilter_null_draw_slice,
662
      .end_frame        = end_frame,
663
      .min_perms        = AV_PERM_WRITE | AV_PERM_READ,
664
      // .rej_perms        = AV_PERM_PRESERVE
665
    },
666
    { .name = NULL}},
667
  .outputs   = (const AVFilterPad[]) {
668
    { .name             = "output1",
669
      .type             = AVMEDIA_TYPE_VIDEO, },
670
    { .name             = "output2",
671
      .type             = AVMEDIA_TYPE_VIDEO, },
672
    { .name = NULL}},
673
};